code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase__ = 16
UpperCAmelCase__ = 32
def a_ (__A , __A = 16 , __A = "bert-base-cased" ) -> List[Any]:
"""simple docstring"""
__a : Any = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__a : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
__a : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : List[Any] = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Dict = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__a : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
__a : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def a_ (__A , __A ) -> Optional[int]:
"""simple docstring"""
# Initialize accelerator
__a : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Tuple = config["lr"]
__a : List[str] = int(config["num_epochs"] )
__a : Any = int(config["seed"] )
__a : Optional[Any] = int(config["batch_size"] )
__a : Any = args.model_name_or_path
set_seed(__SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
__a : Optional[Any] = 1
__a : Union[str, Any] = (len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : List[str] = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=__SCREAMING_SNAKE_CASE , )
else:
__a : Optional[Any] = DummyScheduler(__SCREAMING_SNAKE_CASE , total_num_steps=__SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : str = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Optional[Any] = 0
# Now we train the model
__a : Any = evaluate.load("glue" , "mrpc" )
__a : Union[str, Any] = 0
__a : int = {}
for epoch in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
__a : str = model(**__SCREAMING_SNAKE_CASE )
__a : Any = outputs.loss
__a : int = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a : Any = 0
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a : Tuple = model(**__SCREAMING_SNAKE_CASE )
__a : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a : Union[str, Any] = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__SCREAMING_SNAKE_CASE ) - 1:
__a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
__a : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
__a : Optional[int] = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a_ () -> Dict:
"""simple docstring"""
__a : List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__SCREAMING_SNAKE_CASE , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__SCREAMING_SNAKE_CASE , )
parser.add_argument(
"--output_dir" , type=__SCREAMING_SNAKE_CASE , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=__SCREAMING_SNAKE_CASE , default=3 , help="Number of train epochs." , )
__a : List[str] = parser.parse_args()
__a : Optional[int] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 351
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 579
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase( unittest.TestCase ):
def _lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE_ :Optional[int] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(__UpperCamelCase ) , torch_builtin(__UpperCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(__UpperCamelCase ) , gelu_new(__UpperCamelCase ) ) )
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE_ :str = get_activation('gelu' )
SCREAMING_SNAKE_CASE_ :Optional[Any] = get_activation('gelu_10' )
SCREAMING_SNAKE_CASE_ :str = torch_builtin(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ :Dict = geluaa(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ :Optional[int] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__UpperCamelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : Dict ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(__UpperCamelCase ):
get_activation('bogus' )
with self.assertRaises(__UpperCamelCase ):
get_activation(__UpperCamelCase )
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = get_activation('gelu' )
SCREAMING_SNAKE_CASE_ :List[str] = 1
SCREAMING_SNAKE_CASE_ :List[str] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ :List[Any] = acta.a
| 719
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=13 , SCREAMING_SNAKE_CASE : str=64 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Optional[Any]=32 , SCREAMING_SNAKE_CASE : Optional[int]=5 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : List[Any]=10 , SCREAMING_SNAKE_CASE : Dict=0.02 , SCREAMING_SNAKE_CASE : str=[1, 16, 4, 4] , SCREAMING_SNAKE_CASE : int=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = parent
SCREAMING_SNAKE_CASE_ :int = batch_size
SCREAMING_SNAKE_CASE_ :Any = image_size
SCREAMING_SNAKE_CASE_ :Tuple = patch_size
SCREAMING_SNAKE_CASE_ :List[Any] = num_channels
SCREAMING_SNAKE_CASE_ :Dict = is_training
SCREAMING_SNAKE_CASE_ :Tuple = use_labels
SCREAMING_SNAKE_CASE_ :List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ :Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ :Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ :Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ :Dict = scope
SCREAMING_SNAKE_CASE_ :Union[str, Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE_ :List[str] = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE_ :Optional[int] = num_patches + 1
def _lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ :Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = ViTHybridModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ :List[str] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ :Any = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ :int = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ :Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__snake_case : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : int = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : Dict = False
__snake_case : Union[str, Any] = False
__snake_case : List[Any] = False
def _lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE_ :List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _lowercase ( self : str ):
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :Tuple = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def _lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :int = model_class(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ :List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ :int = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE_ :Any = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _lowercase ( self : Dict ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase( unittest.TestCase ):
@cached_property
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = self.default_image_processor
SCREAMING_SNAKE_CASE_ :Dict = prepare_img()
SCREAMING_SNAKE_CASE_ :str = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
SCREAMING_SNAKE_CASE_ :Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
@require_accelerate
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
SCREAMING_SNAKE_CASE_ :List[Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
SCREAMING_SNAKE_CASE_ :Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ :Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ :Optional[int] = model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE_ :int = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 233
| 0
|
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCamelCase ( snake_case__ : jnp.ndarray ,snake_case__ : int ,snake_case__ : float = 1 ,snake_case__ : float = 1 ,snake_case__ : float = 1.0e4 ,snake_case__ : bool = False ,snake_case__ : float = 1.0 ,):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
__snake_case :Tuple = float(embedding_dim // 2 )
__snake_case :List[str] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__snake_case :Union[str, Any] = min_timescale * jnp.exp(jnp.arange(snake_case__ ,dtype=jnp.floataa ) * -log_timescale_increment )
__snake_case :List[Any] = jnp.expand_dims(snake_case__ ,1 ) * jnp.expand_dims(snake_case__ ,0 )
# scale embeddings
__snake_case :List[Any] = scale * emb
if flip_sin_to_cos:
__snake_case :str = jnp.concatenate([jnp.cos(snake_case__ ), jnp.sin(snake_case__ )] ,axis=1 )
else:
__snake_case :List[str] = jnp.concatenate([jnp.sin(snake_case__ ), jnp.cos(snake_case__ )] ,axis=1 )
__snake_case :Any = jnp.reshape(snake_case__ ,[jnp.shape(snake_case__ )[0], embedding_dim] )
return signal
class snake_case__ ( nn.Module):
'''simple docstring'''
lowerCamelCase : int = 32
lowerCamelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , a__ ) -> str:
'''simple docstring'''
__snake_case :List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(a__ )
__snake_case :Any = nn.silu(a__ )
__snake_case :Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(a__ )
return temb
class snake_case__ ( nn.Module):
'''simple docstring'''
lowerCamelCase : int = 32
lowerCamelCase : bool = False
lowerCamelCase : float = 1
@nn.compact
def __call__( self , a__ ) -> Optional[int]:
'''simple docstring'''
return get_sinusoidal_embeddings(
a__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 455
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
class snake_case__ ( lowercase_ , lowercase_):
'''simple docstring'''
lowerCamelCase : int = 1
@register_to_config
def __init__( self , a__ = 20_00 , a__ = 0.15 , a__ = 0.01 , a__ = 13_48.0 , a__ = 1e-5 , a__ = 1 , ) -> Any:
'''simple docstring'''
__snake_case :int = sigma_max
# setable values
__snake_case :Any = None
self.set_sigmas(a__ , a__ , a__ , a__ )
def __lowercase ( self , a__ , a__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def __lowercase ( self , a__ , a__ = None , a__ = None ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__snake_case :int = torch.linspace(1 , a__ , a__ , device=a__ )
def __lowercase ( self , a__ , a__ = None , a__ = None , a__ = None ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
__snake_case :Any = sigma_max if sigma_max is not None else self.config.sigma_max
__snake_case :Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(a__ , a__ )
__snake_case :int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__snake_case :List[str] = torch.exp(torch.linspace(math.log(a__ ) , math.log(a__ ) , a__ ) )
__snake_case :Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __lowercase ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __lowercase ( self , a__ , a__ , a__ , a__ = None , a__ = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
__snake_case :Optional[int] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__snake_case :Tuple = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__snake_case :str = timesteps.to(self.discrete_sigmas.device )
__snake_case :str = self.discrete_sigmas[timesteps].to(sample.device )
__snake_case :Union[str, Any] = self.get_adjacent_sigma(a__ , a__ ).to(sample.device )
__snake_case :Dict = torch.zeros_like(a__ )
__snake_case :str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__snake_case :Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__snake_case :int = diffusion.unsqueeze(-1 )
__snake_case :Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__snake_case :str = randn_tensor(
sample.shape , layout=sample.layout , generator=a__ , device=sample.device , dtype=sample.dtype )
__snake_case :str = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__snake_case :str = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=a__ , prev_sample_mean=a__ )
def __lowercase ( self , a__ , a__ , a__ = None , a__ = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__snake_case :Any = randn_tensor(sample.shape , layout=sample.layout , generator=a__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__snake_case :List[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case :Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case :Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__snake_case :int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__snake_case :Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__snake_case :Optional[int] = step_size.unsqueeze(-1 )
__snake_case :List[str] = sample + step_size * model_output
__snake_case :Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def __lowercase ( self , a__ , a__ , a__ , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case :Tuple = timesteps.to(original_samples.device )
__snake_case :Optional[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
__snake_case :int = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(a__ ) * sigmas[:, None, None, None]
)
__snake_case :str = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return self.config.num_train_timesteps
| 455
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def A_ ( __lowercase ):
re.sub('<n>' , '' , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 395
| 0
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : List[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __A ( UpperCAmelCase ) -> Any:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase )
def __A ( UpperCAmelCase ) -> Dict:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase ,id=UpperCAmelCase )
| 435
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase : List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase : Optional[Any] = controlnet_params
_UpperCamelCase : Optional[Any] = "bird"
_UpperCamelCase : List[Any] = jax.device_count()
_UpperCamelCase : Any = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
_UpperCamelCase : Any = pipe.prepare_image_inputs([canny_image] * num_samples )
_UpperCamelCase : Tuple = jax.random.PRNGKey(0 )
_UpperCamelCase : Any = jax.random.split(lowercase__ , jax.device_count() )
_UpperCamelCase : Optional[int] = replicate(lowercase__ )
_UpperCamelCase : Optional[Any] = shard(lowercase__ )
_UpperCamelCase : int = shard(lowercase__ )
_UpperCamelCase : List[Any] = pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=50 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase : Dict = images[0, 253:256, 253:256, -1]
_UpperCamelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase : List[Any] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase : List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase : Union[str, Any] = controlnet_params
_UpperCamelCase : Optional[int] = "Chef in the kitchen"
_UpperCamelCase : Any = jax.device_count()
_UpperCamelCase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
_UpperCamelCase : str = pipe.prepare_image_inputs([pose_image] * num_samples )
_UpperCamelCase : str = jax.random.PRNGKey(0 )
_UpperCamelCase : Union[str, Any] = jax.random.split(lowercase__ , jax.device_count() )
_UpperCamelCase : Union[str, Any] = replicate(lowercase__ )
_UpperCamelCase : Union[str, Any] = shard(lowercase__ )
_UpperCamelCase : List[str] = shard(lowercase__ )
_UpperCamelCase : int = pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=50 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase : List[Any] = images[0, 253:256, 253:256, -1]
_UpperCamelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase : Tuple = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 435
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ : Optional[int] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
a_ : Any = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def UpperCAmelCase ( A__: Dict , A__: str=False ) -> Tuple:
__lowerCamelCase : Union[str, Any] = create_model(
'HTSAT-tiny' , 'roberta' , A__ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=A__ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def UpperCAmelCase ( A__: List[str] ) -> List[str]:
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : int = r'.*sequential.(\d+).*'
__lowerCamelCase : List[Any] = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCamelCase : str = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
# replace sequential layers with list
__lowerCamelCase : Optional[int] = re.match(A__ , A__ ).group(1 )
__lowerCamelCase : Dict = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(A__ )//3}.linear.''' )
elif re.match(A__ , A__ ):
__lowerCamelCase : List[str] = int(re.match(A__ , A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCamelCase : List[Any] = 1 if projecton_layer == 0 else 2
__lowerCamelCase : Tuple = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCamelCase : Optional[int] = value
__lowerCamelCase : Optional[int] = mixed_qkv.size(0 ) // 3
__lowerCamelCase : Tuple = mixed_qkv[:qkv_dim]
__lowerCamelCase : List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCamelCase : Any = mixed_qkv[qkv_dim * 2 :]
__lowerCamelCase : str = query_layer
__lowerCamelCase : Tuple = key_layer
__lowerCamelCase : List[Any] = value_layer
else:
__lowerCamelCase : int = value
return model_state_dict
def UpperCAmelCase ( A__: List[Any] , A__: int , A__: List[Any] , A__: List[str]=False ) -> str:
__lowerCamelCase : Union[str, Any] = init_clap(A__ , enable_fusion=A__ )
clap_model.eval()
__lowerCamelCase : Any = clap_model.state_dict()
__lowerCamelCase : Tuple = rename_state_dict(A__ )
__lowerCamelCase : Optional[int] = ClapConfig()
__lowerCamelCase : Dict = enable_fusion
__lowerCamelCase : int = ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ , strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
a_ : Union[str, Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 701
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ : List[Any] = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 263
| 0
|
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : List[Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 393
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
_lowerCAmelCase = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
_lowerCAmelCase = {
'RUCAIBox/mvp': 10_24,
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Any = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE : Optional[Any] = MvpTokenizer
def __init__( self :int , __magic_name__ :Any=None , __magic_name__ :Any=None , __magic_name__ :Dict=None , __magic_name__ :Dict="replace" , __magic_name__ :Any="<s>" , __magic_name__ :Optional[Any]="</s>" , __magic_name__ :Dict="</s>" , __magic_name__ :Optional[Any]="<s>" , __magic_name__ :Any="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :int="<mask>" , __magic_name__ :int=False , __magic_name__ :str=True , **__magic_name__ :Tuple , ) ->str:
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
lowercase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
lowercase : List[str] = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
lowercase : List[Any] = add_prefix_space
lowercase : List[str] = pre_tok_class(**__magic_name__ )
lowercase : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : List[Any] = """post_processor"""
lowercase : List[str] = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
lowercase : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Dict = tuple(state["""sep"""] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state["""cls"""] )
lowercase : Dict = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
lowercase : str = add_prefix_space
lowercase : Tuple = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
lowercase : Dict = trim_offsets
lowercase : Any = True
if changes_to_apply:
lowercase : List[str] = getattr(__magic_name__ , state.pop("""type""" ) )
lowercase : Any = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def __snake_case ( self :int ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self :Any , __magic_name__ :List[Any] ) ->Dict:
lowercase : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
lowercase : List[Any] = value
def __snake_case ( self :Optional[Any] , *__magic_name__ :Optional[int] , **__magic_name__ :Optional[int] ) ->BatchEncoding:
lowercase : Tuple = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def __snake_case ( self :Optional[int] , *__magic_name__ :Optional[Any] , **__magic_name__ :Union[str, Any] ) ->BatchEncoding:
lowercase : Tuple = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def __snake_case ( self :List[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ) ->Tuple[str]:
lowercase : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def __snake_case ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :List[Any]=None ) ->int:
lowercase : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self :Dict , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ) ->List[int]:
lowercase : Any = [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 348
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
_lowerCAmelCase = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
_lowerCAmelCase = {
'RUCAIBox/mvp': 10_24,
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Any = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE : Optional[Any] = MvpTokenizer
def __init__( self :int , __magic_name__ :Any=None , __magic_name__ :Any=None , __magic_name__ :Dict=None , __magic_name__ :Dict="replace" , __magic_name__ :Any="<s>" , __magic_name__ :Optional[Any]="</s>" , __magic_name__ :Dict="</s>" , __magic_name__ :Optional[Any]="<s>" , __magic_name__ :Any="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :int="<mask>" , __magic_name__ :int=False , __magic_name__ :str=True , **__magic_name__ :Tuple , ) ->str:
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
lowercase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
lowercase : List[str] = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
lowercase : List[Any] = add_prefix_space
lowercase : List[str] = pre_tok_class(**__magic_name__ )
lowercase : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : List[Any] = """post_processor"""
lowercase : List[str] = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
lowercase : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Dict = tuple(state["""sep"""] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state["""cls"""] )
lowercase : Dict = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
lowercase : str = add_prefix_space
lowercase : Tuple = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
lowercase : Dict = trim_offsets
lowercase : Any = True
if changes_to_apply:
lowercase : List[str] = getattr(__magic_name__ , state.pop("""type""" ) )
lowercase : Any = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def __snake_case ( self :int ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self :Any , __magic_name__ :List[Any] ) ->Dict:
lowercase : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
lowercase : List[Any] = value
def __snake_case ( self :Optional[Any] , *__magic_name__ :Optional[int] , **__magic_name__ :Optional[int] ) ->BatchEncoding:
lowercase : Tuple = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def __snake_case ( self :Optional[int] , *__magic_name__ :Optional[Any] , **__magic_name__ :Union[str, Any] ) ->BatchEncoding:
lowercase : Tuple = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def __snake_case ( self :List[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ) ->Tuple[str]:
lowercase : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def __snake_case ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :List[Any]=None ) ->int:
lowercase : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self :Dict , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ) ->List[int]:
lowercase : Any = [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 348
| 1
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 374
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = logging.get_logger()
# the current default level is logging.WARNING
snake_case__ : str = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[Any] = logging.get_verbosity()
snake_case__ : Any = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
snake_case__ : int = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCamelCase ( self : Any ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
snake_case__ : Union[str, Any] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
snake_case__ : int = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
snake_case__ : List[Any] = logging.log_levels[env_level_str]
snake_case__ : str = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
snake_case__ : List[Any] = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCamelCase ( self : int ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
snake_case__ : List[Any] = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCamelCase ( self : int ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
snake_case__ : Optional[int] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
snake_case__ : Tuple = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def __snake_case( ) -> List[str]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 374
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[str] = None
# Automatically constructed
__UpperCamelCase : ClassVar[str] = "dict"
__UpperCamelCase : ClassVar[Any] = None
__UpperCamelCase : str = field(default='''Translation''' , init=__lowercase , repr=__lowercase )
def __call__(self ) -> List[str]:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __magic_name__ (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : Optional[List] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[str] = None
# Automatically constructed
__UpperCamelCase : ClassVar[str] = "dict"
__UpperCamelCase : ClassVar[Any] = None
__UpperCamelCase : str = field(default='''TranslationVariableLanguages''' , init=__lowercase , repr=__lowercase )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.languages ) if self.languages else None
def __call__(self ) -> List[Any]:
"""simple docstring"""
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = set(self.languages )
if self.languages and set(__a ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(__a ) - lang_set ) )}) are not in valid set ({', '.join(__a )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE__ : Any = []
for lang, text in translation_dict.items():
if isinstance(__a , __a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE__ : Tuple = zip(*sorted(__a ) )
return {"language": languages, "translation": translations}
def __magic_name__ (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 720
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = 88 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 32 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , cross_attention_dim=SCREAMING_SNAKE_CASE__ , attention_bias=SCREAMING_SNAKE_CASE__ , sample_size=SCREAMING_SNAKE_CASE__ , num_vector_embeds=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
SCREAMING_SNAKE_CASE__ : Tuple = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
SCREAMING_SNAKE_CASE__ : int = [1, 0]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__ = True , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
SCREAMING_SNAKE_CASE__ : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
SCREAMING_SNAKE_CASE__ : Any = self.transformer_index_for_condition[i]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.transformers[transformer_index](
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , cross_attention_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
SCREAMING_SNAKE_CASE__ : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE__ )
| 545
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : str = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__a : ClassVar[Features] = Features({"""audio""": Audio()} )
__a : ClassVar[Features] = Features({"""labels""": ClassLabel} )
__a : str = "audio"
__a : str = "labels"
def snake_case__ ( self, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column], snake_case__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowercase_ : Tuple = copy.deepcopy(self )
lowercase_ : List[Any] = self.label_schema.copy()
lowercase_ : Union[str, Any] = features[self.label_column]
lowercase_ : str = label_schema
return task_template
@property
def snake_case__ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 458
|
from __future__ import annotations
from math import gcd
def __magic_name__ ( lowercase , lowercase = 2 , lowercase = 1 , lowercase = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase , lowercase , lowercase ) -> int:
return (pow(lowercase , 2 ) + step) % modulus
for _ in range(lowercase ):
# These track the position within the cycle detection logic.
lowercase_ : List[Any] = seed
lowercase_ : Any = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowercase_ : List[str] = rand_fn(lowercase , lowercase , lowercase )
lowercase_ : str = rand_fn(lowercase , lowercase , lowercase )
lowercase_ : Any = rand_fn(lowercase , lowercase , lowercase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowercase_ : Tuple = gcd(hare - tortoise , lowercase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowercase_ : int = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
UpperCAmelCase_ = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 458
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __a ( lowercase__ ):
UpperCamelCase_ : str = '''xlm-roberta-xl'''
def __init__( self : Dict , UpperCAmelCase_ : Union[str, Any]=250_880 , UpperCAmelCase_ : Optional[Any]=2_560 , UpperCAmelCase_ : Optional[int]=36 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Tuple=10_240 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Dict=514 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : str=1e-05 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Tuple="absolute" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : str , )-> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class __a ( lowercase__ ):
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 700
|
"""simple docstring"""
SCREAMING_SNAKE_CASE = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> float:
"""simple docstring"""
UpperCamelCase = from_type.lower().strip("s" )
UpperCamelCase = to_type.lower().strip("s" )
UpperCamelCase = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(UpperCAmelCase_ )}"
)
raise ValueError(UpperCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(UpperCAmelCase_ )}"
)
raise ValueError(UpperCAmelCase_ )
UpperCamelCase = METRIC_CONVERSION[from_sanitized]
UpperCamelCase = METRIC_CONVERSION[to_sanitized]
UpperCamelCase = 1
if from_exponent > to_exponent:
UpperCamelCase = from_exponent - to_exponent
else:
UpperCamelCase = -(to_exponent - from_exponent)
return value * pow(10 , UpperCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 556
| 0
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_UpperCAmelCase : Dict = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_UpperCAmelCase : Union[str, Any] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_UpperCAmelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _A( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=False ):
if rouge_types is None:
lowercase =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowercase =rouge_scorer.RougeScorer(rouge_types=snake_case_ , use_stemmer=snake_case_ )
if use_aggregator:
lowercase =scoring.BootstrapAggregator()
else:
lowercase =[]
for ref, pred in zip(snake_case_ , snake_case_ ):
lowercase =scorer.score(snake_case_ , snake_case_ )
if use_aggregator:
aggregator.add_scores(snake_case_ )
else:
scores.append(snake_case_ )
if use_aggregator:
lowercase =aggregator.aggregate()
else:
lowercase ={}
for key in scores[0]:
lowercase =[score[key] for score in scores]
return result
| 72
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14
| 0
|
from __future__ import annotations
a : Optional[Any] = 10
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = 1
__lowercase = max(_UpperCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
__lowercase = [[] for _ in range(_UpperCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowercase = int((i / placement) % RADIX )
buckets[tmp].append(_UpperCamelCase )
# put each buckets' contents into list_of_ints
__lowercase = 0
for b in range(_UpperCamelCase ):
for i in buckets[b]:
__lowercase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : int = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = "efficientformer"
def __init__( self , snake_case_ = [3, 2, 6, 4] , snake_case_ = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case_ = [True, True, True, True] , snake_case_ = 4_4_8 , snake_case_ = 3_2 , snake_case_ = 4 , snake_case_ = 7 , snake_case_ = 5 , snake_case_ = 8 , snake_case_ = 4 , snake_case_ = 0.0 , snake_case_ = 1_6 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 2 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1 , snake_case_ = True , snake_case_ = True , snake_case_ = 1e-5 , snake_case_ = "gelu" , snake_case_ = 0.0_2 , snake_case_ = 1e-1_2 , snake_case_ = 2_2_4 , snake_case_ = 1e-0_5 , **snake_case_ , ) -> None:
'''simple docstring'''
super().__init__(**snake_case_ )
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = hidden_sizes
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = patch_size
__lowercase = num_channels
__lowercase = depths
__lowercase = mlp_expansion_ratio
__lowercase = downsamples
__lowercase = dim
__lowercase = key_dim
__lowercase = attention_ratio
__lowercase = resolution
__lowercase = pool_size
__lowercase = downsample_patch_size
__lowercase = downsample_stride
__lowercase = downsample_pad
__lowercase = drop_path_rate
__lowercase = num_metaad_blocks
__lowercase = distillation
__lowercase = use_layer_scale
__lowercase = layer_scale_init_value
__lowercase = image_size
__lowercase = batch_norm_eps
| 527
| 0
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__lowerCamelCase = logging.getLogger()
__lowerCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( __lowercase ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCamelCase : str ):
os.makedirs(_A , exist_ok=_A )
UpperCAmelCase__ :str = {'''source''': '''What is love ?''', '''target''': '''life'''}
UpperCAmelCase__ :List[Any] = {'''train''': 1_2, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase__ :Tuple = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(_A , f'''{split}.{field}''' ) , '''w''' ) as f:
f.write(_A )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple = "pytorch" ):
UpperCAmelCase__ :Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ :List[str] = os.path.join(_A , '''output''' )
UpperCAmelCase__ :Optional[int] = os.path.join(_A , '''data''' )
self._create_dummy_data(data_dir=_A )
UpperCAmelCase__ :Dict = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
UpperCAmelCase__ :Dict = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_A , env=self.get_env() )
UpperCAmelCase__ :Union[str, Any] = os.path.join(_A , '''metrics.json''' )
with open(_A ) as f:
UpperCAmelCase__ :Optional[Any] = json.load(_A )
return result
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Optional[int] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ :Union[str, Any] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :List[Any] = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ :List[Any] = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 467
|
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE = 6_37_81_37.0
SCREAMING_SNAKE_CASE = 6_35_67_52.31_42_45
SCREAMING_SNAKE_CASE = 6378137
def _lowerCamelCase ( __A : float , __A : float , __A : float , __A : float ) -> float:
_UpperCAmelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
_UpperCAmelCase : str = atan((1 - flattening) * tan(radians(__A ) ) )
_UpperCAmelCase : List[Any] = atan((1 - flattening) * tan(radians(__A ) ) )
_UpperCAmelCase : Dict = radians(__A )
_UpperCAmelCase : List[str] = radians(__A )
# Equation
_UpperCAmelCase : Optional[Any] = sin((phi_a - phi_a) / 2 )
_UpperCAmelCase : Optional[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_UpperCAmelCase : Any = sqrt(sin_sq_phi + (cos(__A ) * cos(__A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 485
| 0
|
"""simple docstring"""
def snake_case ( _a: str )-> list:
'''simple docstring'''
if n_term == "":
return []
lowerCamelCase__ = []
for temp in range(int(_a ) ):
series.append(F'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 659
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_A: str = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , *__A , **__A ):
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 126
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : str = value
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : str = tree
def __snake_case ( self , UpperCamelCase_ ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110
| 0
|
'''simple docstring'''
def _A ( __magic_name__ ):
lowercase__ = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
lowercase__ = hex_num[0] == "-"
if is_negative:
lowercase__ = hex_num[1:]
try:
lowercase__ = int(__magic_name__ , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
lowercase__ = ""
while int_num > 0:
lowercase__ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
_snake_case = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_snake_case = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def _A ( __magic_name__ ):
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 611
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1_000) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1))
if __name__ == "__main__":
print(solution())
| 557
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'efficientformer'
def __init__( self :Union[str, Any] , a :List[int] = [3, 2, 6, 4] , a :List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , a :List[bool] = [True, True, True, True] , a :int = 4_4_8 , a :int = 3_2 , a :int = 4 , a :int = 7 , a :int = 5 , a :int = 8 , a :int = 4 , a :float = 0.0 , a :int = 1_6 , a :int = 3 , a :int = 3 , a :int = 3 , a :int = 2 , a :int = 1 , a :float = 0.0 , a :int = 1 , a :bool = True , a :bool = True , a :float = 1E-5 , a :str = "gelu" , a :float = 0.02 , a :float = 1E-1_2 , a :int = 2_2_4 , a :float = 1E-0_5 , **a :str , ) -> None:
super().__init__(**a )
__UpperCamelCase : Optional[Any] = hidden_act
__UpperCamelCase : Any = hidden_dropout_prob
__UpperCamelCase : str = hidden_sizes
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Optional[Any] = num_attention_heads
__UpperCamelCase : str = initializer_range
__UpperCamelCase : List[str] = layer_norm_eps
__UpperCamelCase : str = patch_size
__UpperCamelCase : str = num_channels
__UpperCamelCase : Any = depths
__UpperCamelCase : Tuple = mlp_expansion_ratio
__UpperCamelCase : List[str] = downsamples
__UpperCamelCase : Optional[int] = dim
__UpperCamelCase : Dict = key_dim
__UpperCamelCase : Optional[Any] = attention_ratio
__UpperCamelCase : Dict = resolution
__UpperCamelCase : Union[str, Any] = pool_size
__UpperCamelCase : Tuple = downsample_patch_size
__UpperCamelCase : Optional[int] = downsample_stride
__UpperCamelCase : Optional[int] = downsample_pad
__UpperCamelCase : Union[str, Any] = drop_path_rate
__UpperCamelCase : Union[str, Any] = num_metaad_blocks
__UpperCamelCase : List[str] = distillation
__UpperCamelCase : str = use_layer_scale
__UpperCamelCase : Tuple = layer_scale_init_value
__UpperCamelCase : str = image_size
__UpperCamelCase : int = batch_norm_eps
| 557
| 1
|
def __lowerCAmelCase ( snake_case : list ) -> list:
if len(snake_case ) <= 1:
return lst
__lowerCamelCase: Any = 1
while i < len(snake_case ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowerCamelCase , __lowerCamelCase: Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowerCamelCase: Tuple = 1
return lst
if __name__ == "__main__":
_A : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
_A : List[str] = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 189
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( _UpperCAmelCase ):
UpperCAmelCase__ : Dict = "Speech2TextFeatureExtractor"
UpperCAmelCase__ : str = "Speech2TextTokenizer"
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = self.feature_extractor
__lowerCamelCase: List[Any] = False
def __call__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__lowerCamelCase: Dict = kwargs.pop("""raw_speech""" )
else:
__lowerCamelCase: int = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCamelCase: Dict = args[0]
__lowerCamelCase: Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__lowerCamelCase: List[str] = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None:
__lowerCamelCase: Any = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCamelCase: List[Any] = encodings["""input_ids"""]
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : Any ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__lowerCamelCase: Any = True
__lowerCamelCase: Any = self.tokenizer
yield
__lowerCamelCase: Any = self.feature_extractor
__lowerCamelCase: Optional[Any] = False
| 189
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __UpperCamelCase ( lowercase_ : Any ):
"""simple docstring"""
if isinstance(lowercase_ , torch.Tensor ):
return image
elif isinstance(lowercase_ , PIL.Image.Image ):
a_ = [image]
a_ = [trans(img.convert('RGB' ) ) for img in image]
a_ = torch.stack(lowercase_ )
return image
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
a_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}' )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = min(int(num_inference_steps * strength ) , UpperCamelCase__ )
a_ = max(num_inference_steps - init_timestep , 0 )
a_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase__ )}' )
a_ = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
a_ = init_latents.shape
a_ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
# get latents
print('add noise to latents at timestep' , UpperCamelCase__ )
a_ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
a_ = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 0.8 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ):
"""simple docstring"""
self.check_inputs(UpperCamelCase__ )
# 2. Preprocess image
a_ = preprocess(UpperCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
a_ , a_ = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , self.device )
a_ = timesteps[:1].repeat(UpperCamelCase__ )
# 4. Prepare latent variables
a_ = self.prepare_latents(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.unet.dtype , self.device , UpperCamelCase__ )
a_ = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase__ ):
# 1. predict noise model_output
a_ = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a_ = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , eta=UpperCamelCase__ , use_clipped_model_output=UpperCamelCase__ , generator=UpperCamelCase__ , ).prev_sample
a_ = (image / 2 + 0.5).clamp(0 , 1 )
a_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase__ )
| 536
|
'''simple docstring'''
import math
import qiskit
def __UpperCamelCase ( lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : int = 1 ):
"""simple docstring"""
if (
isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
a_ = qiskit.QuantumRegister(4 , 'qr' )
a_ = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
a_ = [input_a, input_a, carry_in]
a_ = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase_ ) # measure the last two qbits
a_ = qiskit.Aer.get_backend('aer_simulator' )
a_ = qiskit.execute(lowercase_ , lowercase_ , shots=1_000 )
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 536
| 1
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16
| 1
|
def UpperCamelCase__ ( _A: List[str] = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 479
|
'''simple docstring'''
def __snake_case( ) -> Optional[Any]:
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def __snake_case( _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = 1
snake_case__ : List[Any] = 2
while i * i <= n:
snake_case__ : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __snake_case( ) -> List[str]:
return next(i for i in triangle_number_generator() if count_divisors(_lowerCAmelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 374
| 0
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = [1]
__snake_case , __snake_case , __snake_case : Dict = 0, 0, 0
__snake_case : Dict = ugly_nums[ia] * 2
__snake_case : List[str] = ugly_nums[ia] * 3
__snake_case : List[Any] = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
__snake_case : Tuple = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
__snake_case : Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__snake_case : Dict = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__snake_case : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_00) = }''')
| 390
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : str=3 , _lowerCAmelCase : Union[str, Any]=32 , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=10 , _lowerCAmelCase : str=[8, 16, 32, 64] , _lowerCAmelCase : Union[str, Any]=[1, 1, 2, 1] , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="relu" , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCAmelCase : List[str]=[2, 3, 4] , _lowerCAmelCase : List[Any]=1 , ):
__snake_case : Union[str, Any] = parent
__snake_case : Any = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Tuple = num_channels
__snake_case : Dict = embeddings_size
__snake_case : Any = hidden_sizes
__snake_case : Optional[int] = depths
__snake_case : List[str] = is_training
__snake_case : str = use_labels
__snake_case : str = hidden_act
__snake_case : Optional[Any] = num_labels
__snake_case : str = scope
__snake_case : List[Any] = len(_lowerCAmelCase )
__snake_case : int = out_features
__snake_case : Union[str, Any] = out_indices
__snake_case : Tuple = num_groups
def snake_case__ ( self : List[str] ):
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ):
__snake_case : Any = BitModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ):
__snake_case : Any = self.num_labels
__snake_case : Dict = BitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Optional[Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
__snake_case : Tuple = BitBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Any = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : Optional[Any] = None
__snake_case : Dict = BitBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Tuple = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : Tuple = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
A : Any = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
A : Dict = False
A : List[str] = False
A : str = False
A : List[Any] = False
A : str = False
def snake_case__ ( self : Any ):
__snake_case : List[str] = BitModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def snake_case__ ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Union[str, Any] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def snake_case__ ( self : Optional[Any] ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def snake_case__ ( self : int ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def snake_case__ ( self : Union[str, Any] ):
pass
def snake_case__ ( self : int ):
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(_lowerCAmelCase )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(config=_lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(_lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def snake_case__ ( self : Optional[int] ):
def check_hidden_states_output(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
__snake_case : str = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Optional[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : Tuple = layer_type
__snake_case : List[str] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[str] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def snake_case__ ( self : Tuple ):
pass
def snake_case__ ( self : List[Any] ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def snake_case__ ( self : str ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = BitModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : Union[str, Any] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self : Any ):
__snake_case : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCAmelCase )
__snake_case : str = self.default_image_processor
__snake_case : List[str] = prepare_img()
__snake_case : Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**_lowerCAmelCase )
# verify the logits
__snake_case : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__snake_case : List[str] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Union[str, Any] = (BitBackbone,) if is_torch_available() else ()
A : List[str] = BitConfig
A : Any = False
def snake_case__ ( self : Optional[int] ):
__snake_case : int = BitModelTester(self )
| 390
| 1
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = inspect.getfile(accelerate.test_utils)
_lowerCamelCase : str = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowerCamelCase : Any = test_metrics
@require_cpu
def __snake_case ( self):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def __snake_case ( self):
"""simple docstring"""
debug_launcher(self.test_metrics.main)
@require_single_gpu
def __snake_case ( self):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self):
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""")
_lowerCamelCase : str = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
| 114
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = 9
_lowerCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCAmelCase = kruskal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE )
| 580
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : Union[str, Any]=18 , _A : int=30 , _A : Optional[Any]=400 , _A : Union[str, Any]=True , _A : int=None , _A : List[str]=True , _A : int=False , _A : List[str]=True , _A : Dict=True , _A : Optional[Any]=[0.5, 0.5, 0.5] , _A : str=[0.5, 0.5, 0.5] , ):
A__ : int = parent
A__ : List[str] = batch_size
A__ : Union[str, Any] = num_channels
A__ : Optional[int] = image_size
A__ : List[Any] = min_resolution
A__ : Optional[int] = max_resolution
A__ : Union[str, Any] = do_resize
A__ : Optional[int] = size if size is not None else {"height": 18, "width": 20}
A__ : Any = do_thumbnail
A__ : Optional[Any] = do_align_axis
A__ : Any = do_pad
A__ : List[str] = do_normalize
A__ : Any = image_mean
A__ : int = image_std
def _lowercase ( self : Optional[Any]):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__A : Optional[int] = DonutImageProcessor if is_vision_available() else None
def _lowercase ( self : Any):
A__ : Tuple = DonutImageProcessingTester(self)
@property
def _lowercase ( self : Union[str, Any]):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : List[str]):
A__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A_ , "do_resize"))
self.assertTrue(hasattr(A_ , "size"))
self.assertTrue(hasattr(A_ , "do_thumbnail"))
self.assertTrue(hasattr(A_ , "do_align_long_axis"))
self.assertTrue(hasattr(A_ , "do_pad"))
self.assertTrue(hasattr(A_ , "do_normalize"))
self.assertTrue(hasattr(A_ , "image_mean"))
self.assertTrue(hasattr(A_ , "image_std"))
def _lowercase ( self : List[Any]):
A__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 20})
A__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
# Previous config had dimensions in (width, height) order
A__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84))
self.assertEqual(image_processor.size , {"height": 84, "width": 42})
def _lowercase ( self : List[Any]):
pass
@is_flaky()
def _lowercase ( self : str):
A__ : int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_)
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image)
# Test not batched input
A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ : List[Any] = image_processing(A_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _lowercase ( self : Union[str, Any]):
A__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray)
# Test not batched input
A__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ : Optional[Any] = image_processing(A_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _lowercase ( self : List[Any]):
A__ : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor)
# Test not batched input
A__ : Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ : List[str] = image_processing(A_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 703
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowerCAmelCase__ ( UpperCamelCase ):
def _lowercase ( self : List[Any]):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowercase ( self : Tuple):
A__ : Any = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_A)
def _lowercase ( self : Union[str, Any]):
A__ : str = self._create_example_records()
A__ : Dict = Dataset.from_list(_A)
self.assertListEqual(dset.column_names , ["col_1", "col_2"])
for i, r in enumerate(_A):
self.assertDictEqual(_A , example_records[i])
def _lowercase ( self : List[str]):
A__ : List[str] = self._create_example_records()
A__ : Any = Dataset.from_list(_A)
A__ : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowercase ( self : Any): # checks what happens with missing columns
A__ : Dict = [{"col_1": 1}, {"col_2": "x"}]
A__ : Any = Dataset.from_list(_A)
self.assertDictEqual(dset[0] , {"col_1": 1})
self.assertDictEqual(dset[1] , {"col_1": None}) # NB: first record is used for columns
def _lowercase ( self : str): # checks if the type can be inferred from the second record
A__ : Dict = [{"col_1": []}, {"col_1": [1, 2]}]
A__ : Dict = Dataset.from_list(_A)
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64")))
def _lowercase ( self : Union[str, Any]):
A__ : Dict = Dataset.from_list([])
self.assertEqual(len(_A) , 0)
self.assertListEqual(dset.column_names , [])
| 182
| 0
|
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE__="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
lowerCAmelCase__ : int = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[str] = torch.nn.CosineSimilarity(3 , 1E-08 )
lowerCAmelCase__ : str = torch.nn.Softmax(dim=1 )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return self.bert(**SCREAMING_SNAKE_CASE__ ).last_hidden_state
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = W_supports['''sizes'''].tolist()
lowerCAmelCase__ : str = W_supports['''start_token_id'''].item()
lowerCAmelCase__ : Tuple = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase__ : int = self.BERT(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = self.BERT(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Tuple = W_supports['''input_ids'''] == start_token_id
lowerCAmelCase__ : Optional[int] = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == 0:
lowerCAmelCase__ : Dict = 0
else:
lowerCAmelCase__ : Dict = support_sizes[i - 1]
lowerCAmelCase__ : Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase__ : List[str] = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase__ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase__ : str = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase__ : List[str] = torch.vstack((p_starts, p_start) )
lowerCAmelCase__ : Dict = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase__ : Tuple = p_start
lowerCAmelCase__ : int = p_end
return p_starts, p_ends
| 233
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( __UpperCamelCase ):
__a = (PNDMScheduler,)
__a = (("""num_inference_steps""", 50),)
def lowercase_ ( self , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : List[str] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample
lowerCAmelCase__ : List[str] = 0.1 * sample
lowerCAmelCase__ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ : int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ : Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase__ : Optional[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : Optional[Any] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : Any = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase_ ( self ):
"""simple docstring"""
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Any = dict(self.forward_default_kwargs )
lowerCAmelCase__ : str = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[Any] = self.dummy_sample
lowerCAmelCase__ : int = 0.1 * sample
lowerCAmelCase__ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : int = self.get_scheduler_config()
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ : int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ : Optional[Any] = dummy_past_residuals[:]
lowerCAmelCase__ : str = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : List[str] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : Optional[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : str = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase_ ( self , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = 10
lowerCAmelCase__ : List[str] = self.dummy_model()
lowerCAmelCase__ : str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : str = self.dummy_sample
lowerCAmelCase__ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , '''set_timesteps''' ):
lowerCAmelCase__ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ : int = dummy_past_residuals[:]
lowerCAmelCase__ : Union[str, Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : Any = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ : List[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ : Any = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowercase_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Optional[Any] = self.dummy_sample
lowerCAmelCase__ : Optional[int] = 0.1 * sample
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ : Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
def lowercase_ ( self ):
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.full_loop()
lowerCAmelCase__ : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 233
| 1
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class __a ( _snake_case ):
def __init__( self : Tuple , *lowercase__ : Optional[Any] , **lowercase__ : List[Any]) ->List[Any]:
"""simple docstring"""
super().__init__(*lowercase__ , **lowercase__)
def _UpperCAmelCase ( self : Any , lowercase__ : Dict , lowercase__ : Dict) ->List[str]:
"""simple docstring"""
_lowercase = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase__)
_lowercase = self.values[key]
def _UpperCAmelCase ( self : int) ->List[Any]:
"""simple docstring"""
return (
sum(self.charge_factor - len(lowercase__) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def _UpperCAmelCase ( self : int , lowercase__ : str , lowercase__ : Dict=None) ->Optional[int]:
"""simple docstring"""
if not (
len(self.values[key]) == self.charge_factor and self.values.count(lowercase__) == 0
):
return key
return super()._collision_resolution(lowercase__ , lowercase__)
| 717
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
__SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=0.999 , snake_case_="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase = []
for i in range(snake_case_ ):
_lowercase = i / num_diffusion_timesteps
_lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __a ( _snake_case ,_snake_case ):
@register_to_config
def __init__( self : Tuple , lowercase__ : int = 10_00 , lowercase__ : str = "fixed_small_log" , lowercase__ : bool = True , lowercase__ : Optional[float] = 1.0 , lowercase__ : str = "epsilon" , lowercase__ : str = "squaredcos_cap_v2" , ) ->Optional[Any]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""")
_lowercase = betas_for_alpha_bar(lowercase__)
_lowercase = 1.0 - self.betas
_lowercase = torch.cumprod(self.alphas , dim=0)
_lowercase = torch.tensor(1.0)
# standard deviation of the initial noise distribution
_lowercase = 1.0
# setable values
_lowercase = None
_lowercase = torch.from_numpy(np.arange(0 , lowercase__)[::-1].copy())
_lowercase = variance_type
def _UpperCAmelCase ( self : Optional[Any] , lowercase__ : torch.FloatTensor , lowercase__ : Optional[int] = None) ->torch.FloatTensor:
"""simple docstring"""
return sample
def _UpperCAmelCase ( self : List[str] , lowercase__ : int , lowercase__ : Union[str, torch.device] = None) ->List[str]:
"""simple docstring"""
_lowercase = num_inference_steps
_lowercase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowercase = (np.arange(0 , lowercase__) * step_ratio).round()[::-1].copy().astype(np.intaa)
_lowercase = torch.from_numpy(lowercase__).to(lowercase__)
def _UpperCAmelCase ( self : int , lowercase__ : Optional[Any] , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : int=None) ->Tuple:
"""simple docstring"""
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowercase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowercase = torch.log(torch.clamp(lowercase__ , min=1e-20))
_lowercase = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowercase = variance.log()
_lowercase = beta.log()
_lowercase = (predicted_variance + 1) / 2
_lowercase = frac * max_log + (1 - frac) * min_log
return variance
def _UpperCAmelCase ( self : int , lowercase__ : torch.FloatTensor , lowercase__ : int , lowercase__ : torch.FloatTensor , lowercase__ : Optional[int] = None , lowercase__ : Any=None , lowercase__ : bool = True , ) ->Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
_lowercase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowercase , _lowercase = torch.split(lowercase__ , sample.shape[1] , dim=1)
else:
_lowercase = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
_lowercase = self.alphas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
_lowercase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowercase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowercase = torch.clamp(
lowercase__ , -self.config.clip_sample_range , self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowercase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowercase = 0
if t > 0:
_lowercase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowercase__ , device=model_output.device)
_lowercase = self._get_variance(
lowercase__ , predicted_variance=lowercase__ , prev_timestep=lowercase__ , )
if self.variance_type == "fixed_small_log":
_lowercase = variance
elif self.variance_type == "learned_range":
_lowercase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""")
_lowercase = variance * variance_noise
_lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowercase__ , pred_original_sample=lowercase__)
def _UpperCAmelCase ( self : Dict , lowercase__ : torch.FloatTensor , lowercase__ : torch.FloatTensor , lowercase__ : torch.IntTensor , ) ->torch.FloatTensor:
"""simple docstring"""
_lowercase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype)
_lowercase = timesteps.to(original_samples.device)
_lowercase = alphas_cumprod[timesteps] ** 0.5
_lowercase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
_lowercase = sqrt_alpha_prod.unsqueeze(-1)
_lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
_lowercase = sqrt_one_minus_alpha_prod.unsqueeze(-1)
_lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 572
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : str = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 588
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
lowercase = model.config
lowercase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
if "encoder.model" in name:
lowercase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase = """encoder.""" + name
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = int(key_split[5] )
lowercase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : str=None , lowercase_ : Optional[Any]=False ):
# load original model
lowercase = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
lowercase , lowercase = get_configs(lowercase_ )
lowercase = DonutSwinModel(lowercase_ )
lowercase = MBartForCausalLM(lowercase_ )
lowercase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
lowercase = original_model.state_dict()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
lowercase = load_dataset("""hf-internal-testing/example-documents""" )
lowercase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
lowercase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase = DonutProcessor(lowercase_ , lowercase_ )
lowercase = processor(lowercase_ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase = """When is the coffee break?"""
lowercase = task_prompt.replace("""{user_input}""" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase = original_model.encoder.model.patch_embed(lowercase_ )
lowercase , lowercase = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
lowercase = original_model.encoder(lowercase_ )
lowercase = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
lowercase = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
lowercase = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowercase_ : Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 588
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__A = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__A = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__A = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __A ( self: str ) -> str:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install \"sacrebleu>=1.4.12\"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self: Any , __A: List[str] , __A: int , __A: int = CHRF.CHAR_ORDER , __A: int = CHRF.WORD_ORDER , __A: int = CHRF.BETA , __A: bool = False , __A: bool = False , __A: bool = False , ) -> int:
_A = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_A = [[refs[i] for refs in references] for i in range(lowercase_ )]
_A = CHRF(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_A = sb_chrf.corpus_score(lowercase_ , lowercase_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 715
|
import math
def __A ( _lowercase ):
'''simple docstring'''
_A = []
_A = 2
_A = int(math.sqrt(_lowercase ) ) # Size of every segment
_A = [True] * (end + 1)
_A = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowercase )
for i in range(start * start , end + 1 , _lowercase ):
_A = False
start += 1
prime += in_prime
_A = end + 1
_A = min(2 * end , _lowercase )
while low <= n:
_A = [True] * (high - low + 1)
for each in in_prime:
_A = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowercase , high + 1 , _lowercase ):
_A = False
for j in range(len(_lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_A = high + 1
_A = min(high + end , _lowercase )
return prime
print(sieve(10**6))
| 62
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 34
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Union[str, Any] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 636
| 0
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 711
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowercase :
def __init__( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Any=13 , _lowercase : int=2 , _lowercase : Optional[int]=24 , _lowercase : Any=16 , _lowercase : Optional[Any]=True , _lowercase : Tuple=True , _lowercase : Optional[Any]=32 , _lowercase : Union[str, Any]=5 , _lowercase : int=4 , _lowercase : int=37 , _lowercase : Optional[Any]="gelu" , _lowercase : str=0.1 , _lowercase : List[str]=0.1 , _lowercase : str=10 , _lowercase : List[str]=0.02 , _lowercase : Dict=None , _lowercase : Union[str, Any]=2 , _lowercase : List[str]=2 , ):
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length
SCREAMING_SNAKE_CASE__ : List[str] = num_mel_bins
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scope
SCREAMING_SNAKE_CASE__ : int = frequency_stride
SCREAMING_SNAKE_CASE__ : int = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE__ : str = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE__ : str = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_patches + 2
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, input_values, labels
def lowercase__ ( self : Union[str, Any] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowercase__ ( self : Tuple , _lowercase : str , _lowercase : int , _lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ASTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Tuple = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : int = False
lowerCamelCase : List[str] = False
def lowercase__ ( self : int , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : int , _lowercase : Dict , _lowercase : int ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = ASTModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowercase__ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''input_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@slow
def lowercase__ ( self : Dict ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = ASTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def a ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = torchaudio.load(A__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : str ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : str = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ : Dict = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_audio()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE__ : int = feature_extractor(_lowercase , sampling_rate=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
| 35
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A_ :Tuple = None
try:
import msvcrt
except ImportError:
A_ :Any = None
try:
import fcntl
except ImportError:
A_ :Any = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A_ :int = OSError
# Data
# ------------------------------------------------
A_ :List[Any] = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
A_ :int = '''3.0.12'''
A_ :int = None
def A ( ) -> List[Any]:
global _logger
__UpperCamelCase : Optional[Any] =_logger or logging.getLogger(__name__ )
return _logger
class __A ( a ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =lock_file
return None
def __str__( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Tuple =lock
return None
def __enter__( self ):
"""simple docstring"""
return self.lock
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
self.lock.release()
return None
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ):
"""simple docstring"""
__UpperCamelCase : str =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__UpperCamelCase : Union[str, Any] =self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
__UpperCamelCase : Union[str, Any] =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__UpperCamelCase : Any =None
# The default timeout value.
__UpperCamelCase : Tuple =timeout
# We use this lock primarily for the lock counter.
__UpperCamelCase : List[Any] =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__UpperCamelCase : int =0
return None
@property
def __lowercase ( self ):
"""simple docstring"""
return self._lock_file
@property
def __lowercase ( self ):
"""simple docstring"""
return self._timeout
@timeout.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =float(lowerCamelCase__ )
return None
def __lowercase ( self ):
"""simple docstring"""
raise NotImplementedError()
def __lowercase ( self ):
"""simple docstring"""
raise NotImplementedError()
@property
def __lowercase ( self ):
"""simple docstring"""
return self._lock_file_fd is not None
def __lowercase ( self , lowerCamelCase__=None , lowerCamelCase__=0.05 ):
"""simple docstring"""
if timeout is None:
__UpperCamelCase : List[Any] =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__UpperCamelCase : int =id(self )
__UpperCamelCase : int =self._lock_file
__UpperCamelCase : int =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__UpperCamelCase : List[str] =max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __lowercase ( self , lowerCamelCase__=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__UpperCamelCase : Any =id(self )
__UpperCamelCase : Tuple =self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
__UpperCamelCase : List[str] =0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
self.release()
return None
def __del__( self ):
"""simple docstring"""
self.release(force=lowerCamelCase__ )
return None
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
__UpperCamelCase : Dict =os.path.dirname(lowerCamelCase__ )
__UpperCamelCase : int =str(hash(lowerCamelCase__ ) )
__UpperCamelCase : Optional[Any] =filename[: max_length - len(lowerCamelCase__ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class __A ( a ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] ='\\\\?\\' + relative_to_absolute_path(self.lock_file )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__UpperCamelCase : Optional[int] =os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
__UpperCamelCase : Dict =fd
return None
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =self._lock_file_fd
__UpperCamelCase : Optional[Any] =None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __A ( a ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =os.O_RDWR | os.O_CREAT | os.O_TRUNC
__UpperCamelCase : Optional[int] =os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
__UpperCamelCase : str =fd
return None
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self._lock_file_fd
__UpperCamelCase : List[str] =None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class __A ( a ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__UpperCamelCase : Union[str, Any] =os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
__UpperCamelCase : List[str] =fd
return None
def __lowercase ( self ):
"""simple docstring"""
os.close(self._lock_file_fd )
__UpperCamelCase : int =None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A_ :Dict = None
if msvcrt:
A_ :Union[str, Any] = WindowsFileLock
elif fcntl:
A_ :Tuple = UnixFileLock
else:
A_ :str = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 154
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""dandelin/vilt-b32-finetuned-vqa"""
UpperCamelCase__ : str =(
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
UpperCamelCase__ : Any ="""image_qa"""
UpperCamelCase__ : int =AutoProcessor
UpperCamelCase__ : Optional[Any] =AutoModelForVisualQuestionAnswering
UpperCamelCase__ : Dict =["""image""", """text"""]
UpperCamelCase__ : List[Any] =["""text"""]
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors='pt' )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowerCamelCase__ ).logits
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 154
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
set_seed(770)
a = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
a = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
a = os.path.dirname(os.path.abspath(__file__))
a = os.path.join(os.path.expanduser('''~'''), '''.cache''')
a = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def _snake_case ( _snake_case : List[str] , _snake_case : str=False ) -> List[str]:
'''simple docstring'''
_A = model_type
if use_small:
key += "_small"
return os.path.join(_snake_case , REMOTE_MODEL_PATHS[key]['file_name'] )
def _snake_case ( _snake_case : Dict , _snake_case : int ) -> str:
'''simple docstring'''
os.makedirs(_snake_case , exist_ok=_snake_case )
hf_hub_download(repo_id=_snake_case , filename=_snake_case , local_dir=_snake_case )
def _snake_case ( _snake_case : int , _snake_case : Dict , _snake_case : Dict=False , _snake_case : str="text" ) -> Optional[int]:
'''simple docstring'''
if model_type == "text":
_A = BarkSemanticModel
_A = BarkSemanticConfig
_A = BarkSemanticGenerationConfig
elif model_type == "coarse":
_A = BarkCoarseModel
_A = BarkCoarseConfig
_A = BarkCoarseGenerationConfig
elif model_type == "fine":
_A = BarkFineModel
_A = BarkFineConfig
_A = BarkFineGenerationConfig
else:
raise NotImplementedError()
_A = F'''{model_type}_small''' if use_small else model_type
_A = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_snake_case ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
_A = torch.load(_snake_case , map_location=_snake_case )
# this is a hack
_A = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_A = model_args['vocab_size']
_A = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_A = model_args.pop('n_head' )
_A = model_args.pop('n_embd' )
_A = model_args.pop('n_layer' )
_A = ConfigClass(**checkpoint['model_args'] )
_A = ModelClass(config=_snake_case )
_A = GenerationConfigClass()
_A = model_generation_config
_A = checkpoint['model']
# fixup checkpoint
_A = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(_snake_case ):
# replace part of the key with corresponding layer name in HF implementation
_A = k[len(_snake_case ) :]
for old_layer_name in new_layer_name_dict:
_A = new_k.replace(_snake_case , new_layer_name_dict[old_layer_name] )
_A = state_dict.pop(_snake_case )
_A = set(state_dict.keys() ) - set(model.state_dict().keys() )
_A = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_A = set(model.state_dict().keys() ) - set(state_dict.keys() )
_A = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(_snake_case ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(_snake_case ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(_snake_case , strict=_snake_case )
_A = model.num_parameters(exclude_embeddings=_snake_case )
_A = checkpoint['best_val_loss'].item()
logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(_snake_case , 3 )} loss''' )
model.eval()
model.to(_snake_case )
del checkpoint, state_dict
return model
def _snake_case ( _snake_case : Optional[Any] , _snake_case : int=False , _snake_case : Optional[int]="text" ) -> List[Any]:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_A = 'cpu' # do conversion on cpu
_A = _get_ckpt_path(_snake_case , use_small=_snake_case )
_A = _load_model(_snake_case , _snake_case , model_type=_snake_case , use_small=_snake_case )
# load bark initial model
_A = _bark_load_model(_snake_case , 'cpu' , model_type=_snake_case , use_small=_snake_case )
if model_type == "text":
_A = bark_model['model']
if model.num_parameters(exclude_embeddings=_snake_case ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_A = 5
_A = 10
if model_type in ["text", "coarse"]:
_A = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
_A = bark_model(_snake_case )[0]
_A = model(_snake_case )
# take last logits
_A = output_new_model_total.logits[:, [-1], :]
else:
_A = 3
_A = 8
_A = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_A = model(_snake_case , _snake_case )
_A = bark_model(_snake_case , _snake_case )
_A = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
def _snake_case ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
_A = os.path.join(_snake_case , _snake_case )
_A = BarkSemanticConfig.from_pretrained(os.path.join(_snake_case , 'config.json' ) )
_A = BarkCoarseConfig.from_pretrained(os.path.join(_snake_case , 'config.json' ) )
_A = BarkFineConfig.from_pretrained(os.path.join(_snake_case , 'config.json' ) )
_A = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_A = BarkSemanticModel.from_pretrained(_snake_case )
_A = BarkCoarseModel.from_pretrained(_snake_case )
_A = BarkFineModel.from_pretrained(_snake_case )
_A = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_A = BarkConfig.from_sub_model_configs(
_snake_case , _snake_case , _snake_case , _snake_case )
_A = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_A = BarkModel(_snake_case )
_A = semantic
_A = coarseAcoustic
_A = fineAcoustic
_A = codec
_A = bark_generation_config
Path(_snake_case ).mkdir(exist_ok=_snake_case )
bark.save_pretrained(_snake_case , repo_id=_snake_case , push_to_hub=_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
a = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 7
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7
| 1
|
'''simple docstring'''
import socket
def lowerCAmelCase ( )-> int:
A_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
A_ = socket.gethostname()
A_ = 12312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
A_ = sock.recv(1024 )
if not data:
break
out_file.write(snake_case__ )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 714
|
from sklearn.metrics import recall_score
import datasets
__magic_name__ : List[str] = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
__magic_name__ : str = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
__magic_name__ : Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase="binary" , __UpperCamelCase=None , __UpperCamelCase="warn" , ):
A_ = recall_score(
__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase , zero_division=__UpperCamelCase , )
return {"recall": float(__UpperCamelCase ) if score.size == 1 else score}
| 608
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 5_0003
lowerCamelCase = 5_0002
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = PLBartTokenizer
lowerCAmelCase__ = None
lowerCAmelCase__ = False
def __lowerCamelCase ( self : Any):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase =PLBartTokenizer(SCREAMING_SNAKE_CASE_ , language_codes='base' , keep_accents=SCREAMING_SNAKE_CASE_)
tokenizer.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =PLBartTokenizer(SCREAMING_SNAKE_CASE_ , language_codes='base' , keep_accents=SCREAMING_SNAKE_CASE_)
__lowercase =tokenizer.tokenize('This is a test')
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowercase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__lowercase =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
__lowercase =tokenizer.vocab_size
__lowercase =[tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) for x in range(end - 4 , SCREAMING_SNAKE_CASE_)]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['__java__', '__python__', '__en_XX__', '<mask>'])
__lowercase ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
__lowercase =tokenizer(SCREAMING_SNAKE_CASE_).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_ , )
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =PLBartTokenizer(SCREAMING_SNAKE_CASE_ , language_codes='multi' , keep_accents=SCREAMING_SNAKE_CASE_)
__lowercase =tokenizer.tokenize('This is a test')
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowercase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__lowercase =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
__lowercase =tokenizer.vocab_size
__lowercase =[tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) for x in range(end - 7 , SCREAMING_SNAKE_CASE_)]
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'])
__lowercase ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
__lowercase =tokenizer(SCREAMING_SNAKE_CASE_).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """uclanlp/plbart-python-en_XX"""
lowerCAmelCase__ = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
lowerCAmelCase__ = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
lowerCAmelCase__ = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def __lowerCamelCase ( cls : List[Any]):
'''simple docstring'''
__lowercase =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX')
__lowercase =1
return cls
def __lowerCamelCase ( self : Any):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0_0_0_2)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0_0_0_3)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids)
__lowercase =[EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
__lowercase =self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
__lowercase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 2_0]
self.assertIsInstance(src_text[0] , SCREAMING_SNAKE_CASE_)
__lowercase =1_0
__lowercase =self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_)
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
def __lowerCamelCase ( self : int):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__']) , [5_0_0_0_4, 5_0_0_0_1])
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
__lowercase =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_)
__lowercase =PLBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_)
@require_torch
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt')
__lowercase =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , SCREAMING_SNAKE_CASE_)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens) , return_tensors='pt' , )
__lowercase =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertEqual((2, 2_6) , batch.input_ids.shape)
self.assertEqual((2, 2_6) , batch.attention_mask.shape)
__lowercase =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt')
__lowercase =self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=1_0 , return_tensors='pt')
__lowercase =targets['input_ids']
__lowercase =shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0)
@require_torch
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java')
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_) , {
# A, test, EOS, en_XX
'input_ids': [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0_0_0_1,
} , )
| 474
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42
| 0
|
"""simple docstring"""
from timeit import timeit
def lowercase ( A_ )-> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
a : Dict = 0
while number:
number &= number - 1
result += 1
return result
def lowercase ( A_ )-> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
a : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase ( )-> None:
'''simple docstring'''
def do_benchmark(A_ ) -> None:
a : Tuple = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(A_ ) = }''' )
a : List[Any] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=A_ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(A_ ) = }''' )
a : Dict = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=A_ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(A_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 135
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 135
| 1
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 311
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = """blip_text_model"""
def __init__( self , UpperCamelCase__=3_0524 , UpperCamelCase__=768 , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=8 , UpperCamelCase__=512 , UpperCamelCase__="gelu" , UpperCamelCase__=1e-12 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=3_0522 , UpperCamelCase__=2 , UpperCamelCase__=0 , UpperCamelCase__=102 , UpperCamelCase__=True , UpperCamelCase__=True , **UpperCamelCase__ , ) -> str:
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : List[Any] = encoder_hidden_size
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Tuple = projection_dim
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = initializer_range
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = is_decoder
lowerCamelCase : List[Any] = use_cache
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Dict = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCamelCase : List[str] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = """blip_vision_model"""
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=512 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=384 , UpperCamelCase__=16 , UpperCamelCase__="gelu" , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , **UpperCamelCase__ , ) -> Any:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : List[Any] = projection_dim
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : int = patch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCamelCase : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = """blip"""
lowerCamelCase_ : List[Any] = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=512 , UpperCamelCase__=2.6592 , UpperCamelCase__=256 , **UpperCamelCase__ , ) -> Optional[int]:
super().__init__(**UpperCamelCase__ )
if text_config is None:
lowerCamelCase : Union[str, Any] = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
lowerCamelCase : int = BlipTextConfig(**UpperCamelCase__ )
lowerCamelCase : Tuple = BlipVisionConfig(**UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.vision_config.hidden_size
lowerCamelCase : Tuple = projection_dim
lowerCamelCase : List[Any] = logit_scale_init_value
lowerCamelCase : str = 1.0
lowerCamelCase : Optional[int] = 0.02
lowerCamelCase : Dict = image_text_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase : str = self.text_config.to_dict()
lowerCamelCase : str = self.vision_config.to_dict()
lowerCamelCase : Any = self.__class__.model_type
return output
| 311
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any, _lowerCamelCase : Dict, _lowerCamelCase : Dict=7, _lowerCamelCase : str=3, _lowerCamelCase : Optional[int]=10, _lowerCamelCase : Tuple=18, _lowerCamelCase : Tuple=30, _lowerCamelCase : Optional[Any]=4_00, _lowerCamelCase : Any=True, _lowerCamelCase : Tuple=None, _lowerCamelCase : Dict=True, _lowerCamelCase : str=[0.5, 0.5, 0.5], _lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5], _lowerCamelCase : Optional[int]=None, ):
'''simple docstring'''
__A = size if size is not None else {'''shortest_edge''': 18}
__A = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = num_frames
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
__A = crop_size
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = VivitImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = VivitImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__A = prepare_video_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase, _lowerCamelCase )
self.assertIsInstance(video[0], Image.Image )
# Test not batched input
__A = image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_video_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase, _lowerCamelCase )
self.assertIsInstance(video[0], np.ndarray )
# Test not batched input
__A = image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_video_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase, _lowerCamelCase )
self.assertIsInstance(video[0], torch.Tensor )
# Test not batched input
__A = image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 215
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
__A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__A = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__A = ''''''
else:
__A = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__A = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[
: config.hidden_size, :
]
__A = in_proj_bias[: config.hidden_size]
__A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A = in_proj_weight[
-config.hidden_size :, :
]
__A = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = dct.pop(__UpperCamelCase )
__A = val
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True ):
"""simple docstring"""
__A = ViTConfig()
# patch_size
if model_name[-1] == "8":
__A = 8
# set labels if required
if not base_model:
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = '''imagenet-1k-id2label.json'''
__A = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__A = 3_8_4
__A = 1_5_3_6
__A = 1_2
__A = 6
# load original model from torch hub
__A = torch.hub.load('''facebookresearch/dino:main''' , __UpperCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__A = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
__A = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if base_model:
__A = ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase ).eval()
else:
__A = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__A = ViTImageProcessor()
__A = image_processor(images=prepare_img() , return_tensors='''pt''' )
__A = encoding['''pixel_values''']
__A = model(__UpperCamelCase )
if base_model:
__A = original_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__A = original_model(__UpperCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
lowercase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 215
| 1
|
_snake_case : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case : Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case : Dict = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 81
|
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( __lowerCamelCase ):
create_state_space_tree(__lowerCamelCase , [] , 0 )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if index == len(__lowerCamelCase ):
print(__lowerCamelCase )
return
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 81
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "table-transformer"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : str , _a : Union[str, Any]=True , _a : Any=None , _a : Union[str, Any]=3 , _a : int=100 , _a : List[str]=6 , _a : str=2048 , _a : Any=8 , _a : Tuple=6 , _a : Any=2048 , _a : List[str]=8 , _a : Dict=0.0 , _a : List[str]=0.0 , _a : Union[str, Any]=True , _a : List[Any]="relu" , _a : List[Any]=256 , _a : Any=0.1 , _a : List[str]=0.0 , _a : Optional[Any]=0.0 , _a : List[Any]=0.02 , _a : Tuple=1.0 , _a : List[str]=False , _a : List[Any]="sine" , _a : Optional[Any]="resnet50" , _a : int=True , _a : List[str]=False , _a : List[str]=1 , _a : Tuple=5 , _a : Dict=2 , _a : Union[str, Any]=1 , _a : Tuple=1 , _a : Union[str, Any]=5 , _a : Dict=2 , _a : Optional[Any]=0.1 , **_a : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_SCREAMING_SNAKE_CASE =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =backbone_config.get('''model_type''' )
_SCREAMING_SNAKE_CASE =CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE =config_class.from_dict(_a )
# set timm attributes to None
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =None, None, None
_SCREAMING_SNAKE_CASE =use_timm_backbone
_SCREAMING_SNAKE_CASE =backbone_config
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =num_queries
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =init_xavier_std
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =decoder_layerdrop
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =auxiliary_loss
_SCREAMING_SNAKE_CASE =position_embedding_type
_SCREAMING_SNAKE_CASE =backbone
_SCREAMING_SNAKE_CASE =use_pretrained_backbone
_SCREAMING_SNAKE_CASE =dilation
# Hungarian matcher
_SCREAMING_SNAKE_CASE =class_cost
_SCREAMING_SNAKE_CASE =bbox_cost
_SCREAMING_SNAKE_CASE =giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE =mask_loss_coefficient
_SCREAMING_SNAKE_CASE =dice_loss_coefficient
_SCREAMING_SNAKE_CASE =bbox_loss_coefficient
_SCREAMING_SNAKE_CASE =giou_loss_coefficient
_SCREAMING_SNAKE_CASE =eos_coefficient
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
return self.d_model
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = version.parse("1.11" )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return 12
| 719
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
snake_case_ : str = True
except ImportError:
snake_case_ : Optional[Any] = False
snake_case_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase( a__):
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path)
class A__ ( UpperCamelCase__ ):
@staticmethod
def __UpperCamelCase ( _a : ArgumentParser ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=_a , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=_a , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=_a )
def __init__( self : List[str] , _a : bool , _a : str , _a : Any=None , *_a : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =testing
_SCREAMING_SNAKE_CASE =testing_file
_SCREAMING_SNAKE_CASE =path
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_SCREAMING_SNAKE_CASE =[directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(_a ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
_SCREAMING_SNAKE_CASE =(
Path(_a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_SCREAMING_SNAKE_CASE =path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_a ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
_SCREAMING_SNAKE_CASE =json.load(_a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_a , extra_context=_a , )
_SCREAMING_SNAKE_CASE =[directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
_SCREAMING_SNAKE_CASE =json.load(_a )
_SCREAMING_SNAKE_CASE =configuration['''lowercase_modelname''']
_SCREAMING_SNAKE_CASE =configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"{directory}/configuration.json" )
_SCREAMING_SNAKE_CASE ='''PyTorch''' in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE ='''TensorFlow''' in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE ='''Flax''' in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE =f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(_a , exist_ok=_a )
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=_a )
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(_a : int ):
with open(_a , '''r''' ) as f:
_SCREAMING_SNAKE_CASE =f.readlines()
with open(_a , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_a )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_a : str , _a : str , _a : List[str] ):
# Create temp file
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =mkstemp()
_SCREAMING_SNAKE_CASE =False
with fdopen(_a , '''w''' ) as new_file:
with open(_a ) as old_file:
for line in old_file:
new_file.write(_a )
if line_to_copy_below in line:
_SCREAMING_SNAKE_CASE =True
for line_to_copy in lines_to_copy:
new_file.write(_a )
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(_a , _a )
# Remove original file
remove(_a )
# Move new file
move(_a , _a )
def skip_units(_a : str ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_a : Any ):
with open(_a ) as datafile:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_SCREAMING_SNAKE_CASE =line.split('''"''' )[1]
_SCREAMING_SNAKE_CASE =skip_units(_a )
elif "# Below: " in line and "##" not in line:
_SCREAMING_SNAKE_CASE =line.split('''"''' )[1]
_SCREAMING_SNAKE_CASE =skip_units(_a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_a , _a , _a )
_SCREAMING_SNAKE_CASE =[]
elif "# Replace with" in line and "##" not in line:
_SCREAMING_SNAKE_CASE =[]
elif "##" not in line:
lines_to_copy.append(_a )
remove(_a )
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(_a )
| 191
| 0
|
'''simple docstring'''
import numpy as np
__snake_case : Optional[Any] = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
'''simple docstring'''
A__ : str =np.array(lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> np.ndarray:
'''simple docstring'''
A__ , A__ : List[Any] =np.where(letter == self.SQUARE )
A__ : Union[str, Any] =np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
A__ : Tuple =self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase__ ( self : Dict , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =message.lower()
A__ : int =message.replace(""" """ , """""" )
A__ : Tuple =message.replace("""j""" , """i""" )
A__ : int =np.empty((2, len(lowerCAmelCase_ )) )
for letter_index in range(len(lowerCAmelCase_ ) ):
A__ : List[str] =self.letter_to_numbers(message[letter_index] )
A__ : str =numbers[0]
A__ : List[str] =numbers[1]
A__ : List[str] =first_step.reshape(2 * len(lowerCAmelCase_ ) )
A__ : List[Any] =""""""
for numbers_index in range(len(lowerCAmelCase_ ) ):
A__ : Union[str, Any] =int(second_step[numbers_index * 2] )
A__ : Optional[int] =int(second_step[(numbers_index * 2) + 1] )
A__ : Union[str, Any] =self.numbers_to_letter(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : str =encoded_message + letter
return encoded_message
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
A__ : List[str] =message.lower()
message.replace(""" """ , """""" )
A__ : Optional[int] =np.empty(2 * len(lowerCAmelCase_ ) )
for letter_index in range(len(lowerCAmelCase_ ) ):
A__ : List[str] =self.letter_to_numbers(message[letter_index] )
A__ : Any =numbers[0]
A__ : Any =numbers[1]
A__ : Optional[Any] =first_step.reshape((2, len(lowerCAmelCase_ )) )
A__ : Union[str, Any] =""""""
for numbers_index in range(len(lowerCAmelCase_ ) ):
A__ : Union[str, Any] =int(second_step[0, numbers_index] )
A__ : Optional[int] =int(second_step[1, numbers_index] )
A__ : Tuple =self.numbers_to_letter(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Union[str, Any] =decoded_message + letter
return decoded_message
| 215
|
'''simple docstring'''
__snake_case : Optional[Any] = 8.314462 # Unit - J mol-1 K-1
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 215
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = AltDiffusionPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE =DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=snake_case ,set_alpha_to_one=snake_case ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
SCREAMING_SNAKE_CASE =CLIPTextModel(snake_case )
SCREAMING_SNAKE_CASE =XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE =77
SCREAMING_SNAKE_CASE ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Union[str, Any]=0 ):
if str(snake_case ).startswith('mps' ):
SCREAMING_SNAKE_CASE =torch.manual_seed(snake_case )
else:
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(snake_case )
SCREAMING_SNAKE_CASE ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowerCAmelCase ( self : Optional[int] ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowerCAmelCase ( self : Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE =RobertaSeriesModelWithTransformation(snake_case )
SCREAMING_SNAKE_CASE =text_encoder
SCREAMING_SNAKE_CASE =AltDiffusionPipeline(**snake_case )
SCREAMING_SNAKE_CASE =alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case )
SCREAMING_SNAKE_CASE ='A photo of an astronaut'
SCREAMING_SNAKE_CASE =alt_pipe(**snake_case )
SCREAMING_SNAKE_CASE =output.images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE =np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.get_dummy_components()
SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=snake_case )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE =RobertaSeriesModelWithTransformation(snake_case )
SCREAMING_SNAKE_CASE =text_encoder
SCREAMING_SNAKE_CASE =AltDiffusionPipeline(**snake_case )
SCREAMING_SNAKE_CASE =alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE =self.get_dummy_inputs(snake_case )
SCREAMING_SNAKE_CASE =alt_pipe(**snake_case )
SCREAMING_SNAKE_CASE =output.images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE =np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Dict ):
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' ,safety_checker=snake_case )
SCREAMING_SNAKE_CASE =alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =alt_pipe([prompt] ,generator=snake_case ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type='np' )
SCREAMING_SNAKE_CASE =output.images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE =np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =DDIMScheduler.from_pretrained('BAAI/AltDiffusion' ,subfolder='scheduler' )
SCREAMING_SNAKE_CASE =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' ,scheduler=snake_case ,safety_checker=snake_case )
SCREAMING_SNAKE_CASE =alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =alt_pipe([prompt] ,generator=snake_case ,num_inference_steps=2 ,output_type='numpy' )
SCREAMING_SNAKE_CASE =output.images
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE =np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 252
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252
| 1
|
'''simple docstring'''
import os
lowerCAmelCase : List[str] = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def A_( A : str):
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(A) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A_( A : int):
UpperCamelCase = ''
UpperCamelCase = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A_( A : str = "/p089_roman.txt"):
UpperCamelCase = 0
with open(os.path.dirname(A) + roman_numerals_filename) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(A)
UpperCamelCase = generate_roman_numerals(A)
savings += len(A) - len(A)
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase_ : Tuple = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
_A = ["pixel_values"]
def __init__(self : List[str] , snake_case : bool = True , snake_case : Optional[Dict[str, int]] = None , snake_case : PILImageResampling = PILImageResampling.BILINEAR , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , **snake_case : List[Any] , ) -> None:
super().__init__(**snake_case )
_lowercase : List[str] = size if size is not None else {"shortest_edge": 256}
_lowercase : Union[str, Any] = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : Dict = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : List[Any] = do_resize
_lowercase : Optional[Any] = size
_lowercase : Tuple = resample
_lowercase : Tuple = do_center_crop
_lowercase : Any = crop_size
_lowercase : str = do_rescale
_lowercase : int = rescale_factor
_lowercase : List[Any] = do_normalize
_lowercase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a(self : Tuple , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : int , ) -> np.ndarray:
_lowercase : Union[str, Any] = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowercase : int = get_resize_output_image_size(snake_case , size=size["shortest_edge"] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def _a(self : str , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Union[str, Any] , ) -> np.ndarray:
_lowercase : Union[str, Any] = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def _a(self : Union[str, Any] , snake_case : np.ndarray , snake_case : float , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : List[Any] ) -> np.ndarray:
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _a(self : int , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Tuple , ) -> np.ndarray:
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _a(self : Optional[int] , snake_case : ImageInput , snake_case : Optional[bool] = None , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = None , snake_case : bool = None , snake_case : Dict[str, int] = None , snake_case : Optional[bool] = None , snake_case : Optional[float] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case : Tuple , ) -> Union[str, Any]:
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : Tuple = resample if resample is not None else self.resample
_lowercase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : int = crop_size if crop_size is not None else self.crop_size
_lowercase : Dict = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Dict = image_mean if image_mean is not None else self.image_mean
_lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
_lowercase : int = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowercase : Tuple = [to_numpy_array(snake_case ) for image in images]
if do_resize:
_lowercase : List[str] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
_lowercase : List[str] = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
_lowercase : Any = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
_lowercase : Optional[Any] = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
_lowercase : Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
_lowercase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
def _a(self : Dict , snake_case : List[str] , snake_case : List[Tuple] = None ) -> Optional[Any]:
_lowercase : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case ) != len(snake_case ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case ):
_lowercase : Dict = target_sizes.numpy()
_lowercase : Tuple = []
for idx in range(len(snake_case ) ):
_lowercase : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case )
_lowercase : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case )
else:
_lowercase : Optional[int] = logits.argmax(dim=1 )
_lowercase : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 461
| 0
|
from math import factorial
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = real
if isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = [1] * rank
else:
__SCREAMING_SNAKE_CASE = rank
def __repr__( self ) -> Optional[Any]:
return (
f'''{self.real}+'''
f'''{"+".join(str(_a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real, _a )
def __add__( self, _a ) -> Tuple:
if not isinstance(_a, _a ):
return Dual(self.real + other, self.duals )
__SCREAMING_SNAKE_CASE = self.duals.copy()
__SCREAMING_SNAKE_CASE = other.duals.copy()
if len(_a ) > len(_a ):
o_dual.extend([1] * (len(_a ) - len(_a )) )
elif len(_a ) < len(_a ):
s_dual.extend([1] * (len(_a ) - len(_a )) )
__SCREAMING_SNAKE_CASE = []
for i in range(len(_a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real, _a )
SCREAMING_SNAKE_CASE__ =__add__
def __sub__( self, _a ) -> Any:
return self + other * -1
def __mul__( self, _a ) -> Dict:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other, _a )
__SCREAMING_SNAKE_CASE = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real, _a )
SCREAMING_SNAKE_CASE__ =__mul__
def __truediv__( self, _a ) -> Any:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other, _a )
raise ValueError
def __floordiv__( self, _a ) -> Dict:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other, _a )
raise ValueError
def __pow__( self, _a ) -> Any:
if n < 0 or isinstance(_a, _a ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
__SCREAMING_SNAKE_CASE = self
for _ in range(n - 1 ):
x *= self
return x
def _A ( __snake_case :Dict , __snake_case :Optional[Any] , __snake_case :int ) -> str:
"""simple docstring"""
if not callable(__snake_case ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__snake_case , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__snake_case , __snake_case ):
raise ValueError("differentiate() requires an int as input for order" )
__SCREAMING_SNAKE_CASE = Dual(__snake_case , 1 )
__SCREAMING_SNAKE_CASE = func(__snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 214
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def _A ( __snake_case :str ) -> int:
"""simple docstring"""
for char in word:
__SCREAMING_SNAKE_CASE = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def _A ( __snake_case :List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
for token in tokens:
__SCREAMING_SNAKE_CASE = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
__SCREAMING_SNAKE_CASE = list(__snake_case )
return word_list
def _A ( __snake_case :List[str] , __snake_case :set() ) -> Any:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__SCREAMING_SNAKE_CASE = max([len(__snake_case ) for w in chinese_word_set] )
__SCREAMING_SNAKE_CASE = bert_tokens
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, len(__snake_case )
while start < end:
__SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
__SCREAMING_SNAKE_CASE = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
__SCREAMING_SNAKE_CASE = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__SCREAMING_SNAKE_CASE = "##" + bert_word[j]
__SCREAMING_SNAKE_CASE = start + i
__SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def _A ( __snake_case :List[str] , __snake_case :LTP , __snake_case :BertTokenizer ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__SCREAMING_SNAKE_CASE = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = []
for id in input_ids:
__SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
__SCREAMING_SNAKE_CASE = add_sub_symbol(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
__SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def _A ( __snake_case :Tuple ) -> Any:
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
__SCREAMING_SNAKE_CASE = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
_snake_case : Union[str, Any] = parser.parse_args()
main(args)
| 214
| 1
|
import numpy as np
def lowerCAmelCase__ ( lowerCamelCase_ : np.ndarray ,lowerCamelCase_ : np.ndarray ,lowerCamelCase_ : float = 1E-12 ,lowerCamelCase_ : int = 100 ,):
'''simple docstring'''
assert np.shape(lowerCamelCase_)[0] == np.shape(lowerCamelCase_)[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase_)[0] == np.shape(lowerCamelCase_)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase_) == np.iscomplexobj(lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = np.iscomplexobj(lowerCamelCase_)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase_ ,input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase__ : Any = np.dot(lowerCamelCase_ ,lowerCamelCase_)
# Normalize the resulting output vector.
lowerCAmelCase__ : Any = w / np.linalg.norm(lowerCamelCase_)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase__ : List[Any] = vector.conj().T if is_complex else vector.T
lowerCAmelCase__ : str = np.dot(lowerCamelCase_ ,np.dot(lowerCamelCase_ ,lowerCamelCase_))
# Check convergence.
lowerCAmelCase__ : Union[str, Any] = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = lambda_
if is_complex:
lowerCAmelCase__ : Tuple = np.real(lambda_)
return lambda_, vector
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]])
lowerCAmelCase__ : int = np.array([41, 4, 20])
lowerCAmelCase__ : int = real_input_matrix.astype(np.complexaaa)
lowerCAmelCase__ : Tuple = np.triu(1J * complex_input_matrix ,1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase__ : List[str] = np.array([41, 4, 20]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase__ : Any = real_input_matrix
lowerCAmelCase__ : str = real_vector
elif problem_type == "complex":
lowerCAmelCase__ : Tuple = complex_input_matrix
lowerCAmelCase__ : str = complex_vector
# Our implementation.
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = power_iteration(lowerCamelCase_ ,lowerCamelCase_)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = np.linalg.eigh(lowerCamelCase_)
# Last eigenvalue is the maximum one.
lowerCAmelCase__ : Dict = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase__ : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase_) - np.abs(lowerCamelCase_)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 647
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCAmelCase__ : List[str] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
lowerCAmelCase__ : Dict = [sys.executable] + distributed_args
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
| 647
| 1
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
class snake_case :
lowercase_ = None
@experimental
def _a ( lowercase__ : Any , lowercase__ : str , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return _map_with_joblib(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _a ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = num_proc if num_proc <= len(lowercase__ ) else len(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = len(lowercase__ ) // num_proc
SCREAMING_SNAKE_CASE__ : Any = len(lowercase__ ) % num_proc
SCREAMING_SNAKE_CASE__ : Dict = div * index + min(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(lowercase__ )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(lowercase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = (RLock(),), tqdm.set_lock
with Pool(lowercase__ , initargs=lowercase__ , initializer=lowercase__ ) as pool:
SCREAMING_SNAKE_CASE__ : Any = pool.map(lowercase__ , lowercase__ )
logger.info(f'''Finished {num_proc} processes''' )
SCREAMING_SNAKE_CASE__ : List[Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(lowercase__ )} objects''' )
return mapped
def _a ( lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : str ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowercase__ ):
return joblib.Parallel()(
joblib.delayed(lowercase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE__ : Tuple = None
| 636
|
from math import factorial, radians
def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 636
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ : Optional[Any] = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 8
|
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ :
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=100 , __lowerCamelCase : str=13 , __lowerCamelCase : str=30 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Union[str, Any]=10 , __lowerCamelCase : int=0.0_2 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=[0, 1, 2, 3] , ):
snake_case__ : Optional[int] = parent
snake_case__ : Union[str, Any] = 100
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Dict = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Dict = num_channels
snake_case__ : List[Any] = is_training
snake_case__ : Optional[Any] = use_labels
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : str = scope
snake_case__ : Optional[Any] = out_indices
snake_case__ : List[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Optional[Any] = (image_size // patch_size) ** 2
snake_case__ : Dict = num_patches + 1
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Optional[int] ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _lowerCAmelCase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
snake_case__ : Tuple = BeitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : int = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
snake_case__ : Optional[int] = BeitForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
snake_case__ : List[Any] = self.type_sequence_label_size
snake_case__ : int = BeitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Union[str, Any] = 1
snake_case__ : Any = BeitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
snake_case__ : str = self.num_labels
snake_case__ : int = BeitForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case__ : Dict = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = config_and_inputs
snake_case__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
def _lowerCAmelCase ( self : Any ):
snake_case__ : Any = BeitModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Any ):
pass
def _lowerCAmelCase ( self : int ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _lowerCAmelCase ( self : int ):
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__lowerCamelCase )
snake_case__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
def _lowerCAmelCase ( self : Dict ):
if not self.model_tester.is_training:
return
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__lowerCamelCase ), BeitForMaskedImageModeling]:
continue
snake_case__ : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
snake_case__ : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
snake_case__ : Optional[Any] = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCAmelCase ( self : Any ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ : List[str] = False
snake_case__ : List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__lowerCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case__ : Any = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
snake_case__ : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
snake_case__ : int = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = BeitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__ ( ) -> int:
snake_case__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : List[str] ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Tuple = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__lowerCamelCase )
snake_case__ : Any = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : Any = image_processor(images=__lowerCamelCase , return_tensors='pt' ).pixel_values.to(__lowerCamelCase )
# prepare bool_masked_pos
snake_case__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(pixel_values=__lowerCamelCase , bool_masked_pos=__lowerCamelCase )
snake_case__ : str = outputs.logits
# verify the logits
snake_case__ : Dict = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __lowerCamelCase )
snake_case__ : int = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __lowerCamelCase , atol=1E-2 ) )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__lowerCamelCase )
snake_case__ : List[str] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__lowerCamelCase )
snake_case__ : Dict = outputs.logits
# verify the logits
snake_case__ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __lowerCamelCase )
snake_case__ : Union[str, Any] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
snake_case__ : Any = 281
self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase )
@slow
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Any = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__lowerCamelCase )
snake_case__ : int = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : List[Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__lowerCamelCase )
snake_case__ : Dict = outputs.logits
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 21841) )
self.assertEqual(logits.shape , __lowerCamelCase )
snake_case__ : Dict = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
snake_case__ : Optional[Any] = 2396
self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase )
@slow
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
snake_case__ : str = model.to(__lowerCamelCase )
snake_case__ : List[str] = BeitImageProcessor(do_resize=__lowerCamelCase , size=640 , do_center_crop=__lowerCamelCase )
snake_case__ : Tuple = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
snake_case__ : Union[str, Any] = Image.open(ds[0]['file'] )
snake_case__ : Tuple = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**__lowerCamelCase )
snake_case__ : Any = outputs.logits
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __lowerCamelCase )
snake_case__ : Tuple = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
snake_case__ : Optional[Any] = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__lowerCamelCase , )
else:
snake_case__ : Union[str, Any] = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self : str ):
snake_case__ : Tuple = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
snake_case__ : int = model.to(__lowerCamelCase )
snake_case__ : Optional[Any] = BeitImageProcessor(do_resize=__lowerCamelCase , size=640 , do_center_crop=__lowerCamelCase )
snake_case__ : Tuple = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
snake_case__ : Optional[int] = Image.open(ds[0]['file'] )
snake_case__ : Optional[int] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**__lowerCamelCase )
snake_case__ : Tuple = outputs.logits.detach().cpu()
snake_case__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase , target_sizes=[(500, 300)] )
snake_case__ : List[Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
snake_case__ : Tuple = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
snake_case__ : Optional[Any] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
| 270
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 'distilbert'
A__= {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : List[str] , _lowercase : List[Any]=3_05_22 , _lowercase : str=5_12 , _lowercase : Union[str, Any]=False , _lowercase : List[Any]=6 , _lowercase : Union[str, Any]=12 , _lowercase : List[Any]=7_68 , _lowercase : str=4 * 7_68 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : List[str]="gelu" , _lowercase : str=0.0_2 , _lowercase : Dict=0.1 , _lowercase : Any=0.2 , _lowercase : Any=0 , **_lowercase : Dict , ):
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = sinusoidal_pos_embds
UpperCAmelCase__ = n_layers
UpperCAmelCase__ = n_heads
UpperCAmelCase__ = dim
UpperCAmelCase__ = hidden_dim
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = qa_dropout
UpperCAmelCase__ = seq_classif_dropout
super().__init__(**_lowercase , pad_token_id=_lowercase )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
@property
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 277
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
def __init__( self : Optional[Any] ,*A : Tuple ,**A : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" ,A ,)
super().__init__(*A ,**A )
| 65
|
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def __UpperCAmelCase ( __UpperCamelCase="ro" , __UpperCamelCase="en" , __UpperCamelCase="wmt16" , __UpperCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__lowercase : List[str] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__lowercase : Tuple = datasets.load_dataset(__UpperCamelCase , __UpperCamelCase )
if save_dir is None:
__lowercase : Any = f"""{dataset}-{pair}"""
__lowercase : Optional[Any] = Path(__UpperCamelCase )
save_dir.mkdir(exist_ok=__UpperCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__lowercase : int = '''val''' if split == '''validation''' else split
__lowercase : List[str] = save_dir.joinpath(f"""{fn}.source""" )
__lowercase : int = save_dir.joinpath(f"""{fn}.target""" )
__lowercase : Union[str, Any] = src_path.open('''w+''' )
__lowercase : Dict = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowercase : Any = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 523
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = len(__UpperCamelCase )
__lowercase : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase : Tuple = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 523
| 1
|
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=1_00 , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=4 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.02 , A__=3 , A__=None , A__=[0, 1, 2, 3] , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 1_00
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = out_indices
_SCREAMING_SNAKE_CASE = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self ) -> List[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> str:
_SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_SCREAMING_SNAKE_CASE = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BeitModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def UpperCamelCase ( self ) -> List[str]:
pass
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def UpperCamelCase ( self ) -> Any:
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
_SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
_SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def UpperCamelCase ( self ) -> Tuple:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a (unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ) -> Tuple:
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
_SCREAMING_SNAKE_CASE = torch.ones((1, 1_96) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
_SCREAMING_SNAKE_CASE = 2_81
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
_SCREAMING_SNAKE_CASE = 23_96
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_SCREAMING_SNAKE_CASE = model.to(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=lowerCamelCase__ , size=6_40 , do_center_crop=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_SCREAMING_SNAKE_CASE = Image.open(ds[0]["""file"""] )
_SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_SCREAMING_SNAKE_CASE = model.to(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=lowerCamelCase__ , size=6_40 , do_center_crop=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_SCREAMING_SNAKE_CASE = Image.open(ds[0]["""file"""] )
_SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
_SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(5_00, 3_00)] )
_SCREAMING_SNAKE_CASE = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 591
|
from sklearn.metrics import mean_squared_error
import datasets
__A = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
__A = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
__A = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__="uniform_average" , lowerCamelCase__=True ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = mean_squared_error(
lowerCamelCase__ , lowerCamelCase__ , sample_weight=lowerCamelCase__ , multioutput=lowerCamelCase__ , squared=lowerCamelCase__ )
return {"mse": mse}
| 469
| 0
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A_ (__a ):
'''simple docstring'''
return x + 2
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
A_ = "x = 3"
A_ = {}
A_ = evaluate(_snake_case , {} , state=_snake_case )
assert result == 3
self.assertDictEqual(_snake_case , {"x": 3} )
A_ = "x = y"
A_ = {"y": 5}
A_ = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_snake_case , {"x": 5, "y": 5} )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
A_ = "y = add_two(x)"
A_ = {"x": 3}
A_ = evaluate(_snake_case , {"add_two": add_two} , state=_snake_case )
assert result == 5
self.assertDictEqual(_snake_case , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
A_ = evaluate(_snake_case , {} , state=_snake_case )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
A_ = "x = 3"
A_ = {}
A_ = evaluate(_snake_case , {} , state=_snake_case )
assert result == 3
self.assertDictEqual(_snake_case , {"x": 3} )
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
A_ = "test_dict = {'x': x, 'y': add_two(x)}"
A_ = {"x": 3}
A_ = evaluate(_snake_case , {"add_two": add_two} , state=_snake_case )
self.assertDictEqual(_snake_case , {"x": 3, "y": 5} )
self.assertDictEqual(_snake_case , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A_ = "x = 3\ny = 5"
A_ = {}
A_ = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_snake_case , {"x": 3, "y": 5} )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = "text = f'This is x: {x}.'"
A_ = {"x": 3}
A_ = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_snake_case , {"x": 3, "text": "This is x: 3."} )
def lowerCamelCase__ ( self : str ) -> Any:
"""simple docstring"""
A_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
A_ = {"x": 3}
A_ = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_snake_case , {"x": 3, "y": 2} )
A_ = {"x": 8}
A_ = evaluate(_snake_case , {} , state=_snake_case )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_snake_case , {"x": 8, "y": 5} )
def lowerCamelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
A_ = "test_list = [x, add_two(x)]"
A_ = {"x": 3}
A_ = evaluate(_snake_case , {"add_two": add_two} , state=_snake_case )
self.assertListEqual(_snake_case , [3, 5] )
self.assertDictEqual(_snake_case , {"x": 3, "test_list": [3, 5]} )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
A_ = "y = x"
A_ = {"x": 3}
A_ = evaluate(_snake_case , {} , state=_snake_case )
assert result == 3
self.assertDictEqual(_snake_case , {"x": 3, "y": 3} )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A_ = "test_list = [x, add_two(x)]\ntest_list[1]"
A_ = {"x": 3}
A_ = evaluate(_snake_case , {"add_two": add_two} , state=_snake_case )
assert result == 5
self.assertDictEqual(_snake_case , {"x": 3, "test_list": [3, 5]} )
A_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
A_ = {"x": 3}
A_ = evaluate(_snake_case , {"add_two": add_two} , state=_snake_case )
assert result == 5
self.assertDictEqual(_snake_case , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCamelCase__ ( self : int ) -> Dict:
"""simple docstring"""
A_ = "x = 0\nfor i in range(3):\n x = i"
A_ = {}
A_ = evaluate(_snake_case , {"range": range} , state=_snake_case )
assert result == 2
self.assertDictEqual(_snake_case , {"x": 2, "i": 2} )
| 482
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : str = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Union[str, Any] = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 482
| 1
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__a = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Any:
'''simple docstring'''
if len(_lowerCamelCase ) == 0 or len(_lowerCamelCase ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(_lowerCamelCase ) )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__a = [sequences]
__a = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_lowerCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
def __init__( self , lowerCamelCase=ZeroShotClassificationArgumentHandler() , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
__a = args_parser
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=TruncationStrategy.ONLY_FIRST , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
__a = self.tokenizer.eos_token
try:
__a = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , )
except Exception as e:
if "too short" in str(_lowerCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__a = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , padding=_lowerCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __UpperCamelCase ( self , **lowerCamelCase ) ->str:
'''simple docstring'''
if kwargs.get('multi_class' , _lowerCamelCase ) is not None:
__a = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
__a = {}
if "candidate_labels" in kwargs:
__a = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
__a = kwargs['hypothesis_template']
__a = {}
if "multi_label" in kwargs:
__a = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase , ) ->Union[str, Any]:
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
pass
elif len(_lowerCamelCase ) == 1 and "candidate_labels" not in kwargs:
__a = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase="This example is {}." ) ->Dict:
'''simple docstring'''
__a , __a = self._args_parser(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(_lowerCamelCase , _lowerCamelCase ) ):
__a = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_lowerCamelCase ) - 1,
**model_input,
}
def __UpperCamelCase ( self , lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = inputs['candidate_label']
__a = inputs['sequence']
__a = {k: inputs[k] for k in self.tokenizer.model_input_names}
__a = self.model(**_lowerCamelCase )
__a = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase=False ) ->int:
'''simple docstring'''
__a = [outputs['candidate_label'] for outputs in model_outputs]
__a = [outputs['sequence'] for outputs in model_outputs]
__a = np.concatenate([output['logits'].numpy() for output in model_outputs] )
__a = logits.shape[0]
__a = len(_lowerCamelCase )
__a = N // n
__a = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_lowerCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__a = self.entailment_id
__a = -1 if entailment_id == 0 else 0
__a = reshaped_outputs[..., [contradiction_id, entailment_id]]
__a = np.exp(_lowerCamelCase ) / np.exp(_lowerCamelCase ).sum(-1 , keepdims=_lowerCamelCase )
__a = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__a = reshaped_outputs[..., self.entailment_id]
__a = np.exp(_lowerCamelCase ) / np.exp(_lowerCamelCase ).sum(-1 , keepdims=_lowerCamelCase )
__a = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 448
|
'''simple docstring'''
from PIL import Image
def _lowerCAmelCase ( lowerCamelCase_ : Image ):
__lowercase , __lowercase = image.size
__lowercase = 0
__lowercase = image.load()
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
__lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase_ ):
for i in range(lowerCamelCase_ ):
__lowercase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 502
| 0
|
'''simple docstring'''
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCamelCase : Any = open # noqa: we just need to have a builtin inside this module to test it properly
| 700
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
def __init__(self : Tuple , *A__ : Union[str, Any] , **A__ : Optional[Any] ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , A__ , )
super().__init__(*A__ , **A__ )
| 459
| 0
|
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str = "https://www.worldometers.info/coronavirus" ):
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(UpperCamelCase__ ).text , """html.parser""" )
SCREAMING_SNAKE_CASE__ = soup.findAll("""h1""" )
SCREAMING_SNAKE_CASE__ = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase__ , UpperCamelCase__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 6
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Optional[int]=None ) -> List[str]:
if attention_mask is None:
__lowerCAmelCase : str = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case_ :
A_ = OPTConfig
A_ = {}
A_ = 'gelu'
def __init__( self : Dict , _snake_case : Any , _snake_case : List[str]=13 , _snake_case : Optional[Any]=7 , _snake_case : Tuple=True , _snake_case : Optional[int]=False , _snake_case : str=99 , _snake_case : Optional[Any]=16 , _snake_case : Optional[int]=2 , _snake_case : Union[str, Any]=4 , _snake_case : int=4 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Tuple=20 , _snake_case : str=2 , _snake_case : str=1 , _snake_case : str=0 , _snake_case : Tuple=16 , _snake_case : List[Any]=16 , )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : int = seq_length
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : List[Any] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : List[str] = eos_token_id
__lowerCAmelCase : Optional[Any] = pad_token_id
__lowerCAmelCase : Optional[Any] = bos_token_id
__lowerCAmelCase : Optional[int] = embed_dim
__lowerCAmelCase : List[str] = word_embed_proj_dim
__lowerCAmelCase : Dict = False
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_snake_case , **self.config_updates , )
__lowerCAmelCase : str = prepare_opt_inputs_dict(_snake_case , _snake_case )
return config, inputs_dict
def UpperCAmelCase__ ( self : str , _snake_case : List[str] , _snake_case : Tuple )->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = TFOPTModel(config=_snake_case )
__lowerCAmelCase : Union[str, Any] = inputs_dict["""input_ids"""]
__lowerCAmelCase : Optional[int] = input_ids[:1, :]
__lowerCAmelCase : List[str] = inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase : Optional[Any] = 1
# first forward pass
__lowerCAmelCase : List[Any] = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case )
__lowerCAmelCase , __lowerCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase : Dict = model(_snake_case , attention_mask=_snake_case )[0]
__lowerCAmelCase : Dict = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
@require_tf
class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ):
A_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A_ = (TFOPTForCausalLM,) if is_tf_available() else ()
A_ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
A_ = False
A_ = False
A_ = False
A_ = 10
def UpperCAmelCase__ ( self : List[Any] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = TFOPTModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
def UpperCAmelCase__ ( self : List[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_snake_case : List[str] , _snake_case : Any ):
if hasattr(_snake_case , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_snake_case , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCAmelCase : Union[str, Any] = model_class(config=_snake_case )
__lowerCAmelCase : str = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowerCAmelCase : Optional[Any] = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_snake_case )
__lowerCAmelCase : Dict = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowerCAmelCase : Optional[int] = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase : int = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _snake_case )
# check that weights remain the same after resizing
__lowerCAmelCase : Dict = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase : Dict = False
self.assertTrue(_snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _snake_case )
__lowerCAmelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase : Any = False
self.assertTrue(_snake_case )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict ) -> Optional[Any]:
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
@require_tf
class snake_case_ ( unittest.TestCase ):
A_ = 99
def UpperCAmelCase__ ( self : Optional[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : str = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase : Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase : Optional[Any] = input_ids.shape[0]
__lowerCAmelCase : str = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case_ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
__lowerCAmelCase : int = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCAmelCase : str = tf.not_equal(_snake_case , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase : List[str] = model(input_ids=_snake_case , attention_mask=_snake_case ).last_hidden_state
__lowerCAmelCase : Union[str, Any] = (1, 11, 512)
self.assertEqual(output.shape , _snake_case )
__lowerCAmelCase : Union[str, Any] = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-3 ) )
__lowerCAmelCase : str = tf.function(_snake_case , jit_compile=_snake_case )
__lowerCAmelCase : List[str] = xla_generate(_snake_case , _snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-2 ) )
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple )->Dict:
'''simple docstring'''
super().setUp()
__lowerCAmelCase : Optional[int] = """facebook/opt-350m"""
def UpperCAmelCase__ ( self : Optional[int] )->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase : Optional[int] = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase : Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase : Tuple = tokenizer(_snake_case , return_tensors="""tf""" , padding=_snake_case , add_special_tokens=_snake_case )
__lowerCAmelCase : Tuple = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase : Optional[Any] = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
__lowerCAmelCase : Tuple = tf.function(_snake_case , jit_compile=_snake_case )
__lowerCAmelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : List[Any] )->Dict:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCAmelCase__ ( self : Tuple )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = """facebook/opt-125m"""
__lowerCAmelCase : Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__lowerCAmelCase : int = []
__lowerCAmelCase : Any = GPTaTokenizer.from_pretrained(_snake_case )
__lowerCAmelCase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowerCAmelCase : Union[str, Any] = tokenizer(_snake_case , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Tuple = model.generate(_snake_case , max_length=10 )
__lowerCAmelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = """facebook/opt-350m"""
__lowerCAmelCase : Any = GPTaTokenizer.from_pretrained(_snake_case )
__lowerCAmelCase : Dict = TFOPTForCausalLM.from_pretrained(_snake_case )
__lowerCAmelCase : int = """left"""
# use different length sentences to test batching
__lowerCAmelCase : Any = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__lowerCAmelCase : Tuple = tokenizer(_snake_case , return_tensors="""tf""" , padding=_snake_case )
__lowerCAmelCase : int = inputs["""input_ids"""]
__lowerCAmelCase : Union[str, Any] = model.generate(input_ids=_snake_case , attention_mask=inputs["""attention_mask"""] )
__lowerCAmelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Tuple = model.generate(input_ids=_snake_case )
__lowerCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
__lowerCAmelCase : List[str] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Union[str, Any] = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__lowerCAmelCase : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case )
__lowerCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case )
__lowerCAmelCase : Union[str, Any] = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = """facebook/opt-350m"""
__lowerCAmelCase : str = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Optional[int] = GPTaTokenizer.from_pretrained(_snake_case )
__lowerCAmelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowerCAmelCase : Dict = tokenizer(_snake_case , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Union[str, Any] = model.generate(_snake_case , max_length=10 )
__lowerCAmelCase : int = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
| 504
| 0
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 569
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_SCREAMING_SNAKE_CASE = [p / w for p, w in zip(snake_case__ ,snake_case__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_SCREAMING_SNAKE_CASE = sorted(snake_case__ )
# declaring useful variables
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_SCREAMING_SNAKE_CASE = sorted_profit_by_weight[length - i - 1]
_SCREAMING_SNAKE_CASE = profit_by_weight.index(snake_case__ )
_SCREAMING_SNAKE_CASE = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
UpperCamelCase = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
UpperCamelCase = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
UpperCamelCase = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 569
| 1
|
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [1]
lowerCamelCase_ : Optional[int] = 0, 0, 0
lowerCamelCase_ : int = ugly_nums[ia] * 2
lowerCamelCase_ : Tuple = ugly_nums[ia] * 3
lowerCamelCase_ : List[str] = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase_):
lowerCamelCase_ : Optional[Any] = min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
ugly_nums.append(lowerCamelCase_)
if next_num == next_a:
ia += 1
lowerCamelCase_ : List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCamelCase_ : str = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCamelCase_ : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 250
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/xglm-564M''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a : Any = 7
__a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__a : List[str] = len(self.sp_model )
__a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
__a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ):
'''simple docstring'''
__a : Tuple = self.__dict__.copy()
__a : List[str] = None
__a : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47
| 0
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 1
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = IMAGENET_DEFAULT_MEAN , _UpperCAmelCase = IMAGENET_DEFAULT_STD , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = size if size is not None else {'''shortest_edge''': 224}
__a : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__a : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__a : Tuple = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
__a : Tuple = do_resize
__a : Optional[int] = size
__a : List[Any] = resample
__a : List[str] = do_center_crop
__a : Dict = crop_size
__a : Union[str, Any] = do_rescale
__a : int = rescale_factor
__a : int = do_normalize
__a : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a : str = int((256 / 224) * size['''shortest_edge'''] )
__a : Tuple = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__a : Tuple = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_UpperCAmelCase , size=(size_dict['''height'''], size_dict['''width''']) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : List[Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
__a : str = do_resize if do_resize is not None else self.do_resize
__a : List[str] = resample if resample is not None else self.resample
__a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : Any = do_rescale if do_rescale is not None else self.do_rescale
__a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : str = do_normalize if do_normalize is not None else self.do_normalize
__a : List[str] = image_mean if image_mean is not None else self.image_mean
__a : List[Any] = image_std if image_std is not None else self.image_std
__a : Optional[Any] = size if size is not None else self.size
__a : Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__a : Optional[int] = crop_size if crop_size is not None else self.crop_size
__a : int = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
__a : int = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__a : Union[str, Any] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__a : Union[str, Any] = [self.resize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_center_crop:
__a : str = [self.center_crop(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_rescale:
__a : Optional[int] = [self.rescale(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_normalize:
__a : Union[str, Any] = [self.normalize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
__a : Tuple = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__a : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 52
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52
| 1
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : Tuple = logging.get_logger(__name__)
__A : Tuple = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : str = "conditional_detr"
__UpperCAmelCase : Optional[int] = ["past_key_values"]
__UpperCAmelCase : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : int , lowercase__ : List[str]=True , lowercase__ : List[str]=None , lowercase__ : List[Any]=3 , lowercase__ : Union[str, Any]=3_0_0 , lowercase__ : List[Any]=6 , lowercase__ : List[str]=2_0_4_8 , lowercase__ : List[str]=8 , lowercase__ : str=6 , lowercase__ : int=2_0_4_8 , lowercase__ : Tuple=8 , lowercase__ : Tuple=0.0 , lowercase__ : str=0.0 , lowercase__ : List[str]=True , lowercase__ : Tuple="relu" , lowercase__ : Optional[int]=2_5_6 , lowercase__ : Any=0.1 , lowercase__ : Any=0.0 , lowercase__ : List[Any]=0.0 , lowercase__ : Tuple=0.0_2 , lowercase__ : int=1.0 , lowercase__ : Optional[Any]=False , lowercase__ : Tuple="sine" , lowercase__ : Dict="resnet50" , lowercase__ : Tuple=True , lowercase__ : Dict=False , lowercase__ : int=2 , lowercase__ : List[Any]=5 , lowercase__ : Tuple=2 , lowercase__ : Tuple=1 , lowercase__ : Dict=1 , lowercase__ : List[Any]=2 , lowercase__ : Optional[int]=5 , lowercase__ : Any=2 , lowercase__ : List[str]=0.2_5 , **lowercase__ : Optional[Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowercase : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase__ , lowercase__ ):
__lowercase : Union[str, Any] = backbone_config.get("model_type" )
__lowercase : int = CONFIG_MAPPING[backbone_model_type]
__lowercase : Optional[Any] = config_class.from_dict(lowercase__ )
__lowercase : Dict = use_timm_backbone
__lowercase : Any = backbone_config
__lowercase : List[str] = num_channels
__lowercase : List[Any] = num_queries
__lowercase : Optional[int] = d_model
__lowercase : int = encoder_ffn_dim
__lowercase : Tuple = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : str = decoder_ffn_dim
__lowercase : Any = decoder_layers
__lowercase : Any = decoder_attention_heads
__lowercase : int = dropout
__lowercase : int = attention_dropout
__lowercase : int = activation_dropout
__lowercase : Tuple = activation_function
__lowercase : str = init_std
__lowercase : int = init_xavier_std
__lowercase : Dict = encoder_layerdrop
__lowercase : int = decoder_layerdrop
__lowercase : Union[str, Any] = encoder_layers
__lowercase : Union[str, Any] = auxiliary_loss
__lowercase : Optional[Any] = position_embedding_type
__lowercase : Any = backbone
__lowercase : Dict = use_pretrained_backbone
__lowercase : Any = dilation
# Hungarian matcher
__lowercase : List[Any] = class_cost
__lowercase : Optional[int] = bbox_cost
__lowercase : str = giou_cost
# Loss coefficients
__lowercase : Any = mask_loss_coefficient
__lowercase : Optional[int] = dice_loss_coefficient
__lowercase : Tuple = cls_loss_coefficient
__lowercase : Optional[int] = bbox_loss_coefficient
__lowercase : Optional[Any] = giou_loss_coefficient
__lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ )
@property
def snake_case ( self : List[str] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Any ):
return self.d_model
def snake_case ( self : List[str] ):
__lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase : str = self.backbone_config.to_dict()
__lowercase : Any = self.__class__.model_type
return output
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Dict = version.parse("1.11" )
@property
def snake_case ( self : List[str] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def snake_case ( self : Optional[Any] ):
return 1e-5
@property
def snake_case ( self : Any ):
return 1_2
| 281
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , lowercase__ : Tuple , lowercase__ : Tuple=1_3 , lowercase__ : Optional[int]=7 , lowercase__ : List[str]=False , lowercase__ : Tuple=True , lowercase__ : int=False , lowercase__ : List[str]=False , lowercase__ : Optional[Any]=1_9 , lowercase__ : int=3_2 , lowercase__ : List[Any]=5 , lowercase__ : Optional[int]=4 , lowercase__ : Any=3_7 , lowercase__ : Tuple="gelu" , lowercase__ : int=0.1 , lowercase__ : Tuple=0.1 , lowercase__ : List[Any]=5_1_2 , lowercase__ : List[Any]=1_6 , lowercase__ : Union[str, Any]=2 , lowercase__ : List[str]=0.0_2 , lowercase__ : List[Any]=3 , lowercase__ : Any=4 , lowercase__ : Optional[Any]=None , ):
__lowercase : int = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = seq_length
__lowercase : str = is_training
__lowercase : List[Any] = use_input_mask
__lowercase : Any = use_token_type_ids
__lowercase : str = use_labels
__lowercase : Dict = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : int = num_attention_heads
__lowercase : Union[str, Any] = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Optional[int] = attention_probs_dropout_prob
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : str = type_sequence_label_size
__lowercase : List[str] = initializer_range
__lowercase : Optional[Any] = num_labels
__lowercase : Tuple = num_choices
__lowercase : Optional[Any] = scope
def snake_case ( self : Optional[int] ):
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Union[str, Any] = None
if self.use_input_mask:
__lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
__lowercase : Any = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Any ):
__lowercase : Dict = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowercase__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def snake_case ( self : str , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : str ):
__lowercase : List[Any] = EsmForProteinFolding(config=lowercase__ ).float()
model.to(lowercase__ )
model.eval()
__lowercase : List[str] = model(lowercase__ , attention_mask=lowercase__ )
__lowercase : Any = model(lowercase__ )
__lowercase : Any = model(lowercase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def snake_case ( self : Tuple ):
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : Any = config_and_inputs
__lowercase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = (EsmForProteinFolding,) if is_torch_available() else ()
__UpperCAmelCase : int = ()
__UpperCAmelCase : Tuple = {} if is_torch_available() else {}
__UpperCAmelCase : Optional[Any] = False
def snake_case ( self : Tuple ):
__lowercase : Tuple = EsmFoldModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7 )
def snake_case ( self : Dict ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
@unittest.skip("Does not support attention outputs" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip
def snake_case ( self : Tuple ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def snake_case ( self : Optional[int] ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def snake_case ( self : List[str] ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def snake_case ( self : int ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : Any ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : List[str] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : List[str] ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold only has one output format." )
def snake_case ( self : Tuple ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def snake_case ( self : Any ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def snake_case ( self : str ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def snake_case ( self : Any ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def snake_case ( self : Any ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def snake_case ( self : Tuple ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def snake_case ( self : List[str] ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self : Any ):
pass
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
@slow
def snake_case ( self : Union[str, Any] ):
__lowercase : Tuple = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
__lowercase : Optional[int] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__lowercase : str = model(lowercase__ )["positions"]
__lowercase : Union[str, Any] = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowercase__ , atol=1e-4 ) )
| 281
| 1
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
A = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
A = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = ' Hello world! cécé herlolip'
A = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def UpperCAmelCase ( UpperCAmelCase__ : Tuple):
lowerCamelCase : List[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]):
lowerCamelCase : Union[str, Any] = dct.pop(lowerCamelCase__)
lowerCamelCase : str = val
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase__ , map_location='cpu')
lowerCamelCase : Union[str, Any] = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn').eval()
hub_interface.model.load_state_dict(sd['model'])
return hub_interface
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
lowerCamelCase : List[Any] = emb.weight.shape
lowerCamelCase : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__)
lowerCamelCase : int = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None):
if not os.path.exists(lowerCamelCase__):
lowerCamelCase : Optional[Any] = torch.hub.load('pytorch/fairseq' , lowerCamelCase__).eval()
else:
lowerCamelCase : Dict = load_xsum_checkpoint(lowerCamelCase__)
bart.model.upgrade_state_dict(bart.model.state_dict())
if hf_checkpoint_name is None:
lowerCamelCase : Optional[Any] = checkpoint_path.replace('.' , '-')
lowerCamelCase : Optional[Any] = BartConfig.from_pretrained(lowerCamelCase__)
lowerCamelCase : int = bart.encode(lowerCamelCase__).unsqueeze(0)
lowerCamelCase : Dict = BartTokenizer.from_pretrained(lowerCamelCase__).encode(lowerCamelCase__ , return_tensors='pt').unsqueeze(0)
if not torch.eq(lowerCamelCase__ , lowerCamelCase__).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''')
if checkpoint_path == "bart.large.mnli":
lowerCamelCase : Optional[int] = bart.state_dict()
remove_ignore_keys_(lowerCamelCase__)
lowerCamelCase : Dict = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
lowerCamelCase : Union[str, Any] = BartForSequenceClassification(lowerCamelCase__).eval()
model.load_state_dict(lowerCamelCase__)
lowerCamelCase : Dict = bart.predict('mnli' , lowerCamelCase__ , return_logits=lowerCamelCase__)
lowerCamelCase : Any = model(lowerCamelCase__)[0] # logits
else: # no classification heads to worry about
lowerCamelCase : Dict = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase__)
lowerCamelCase : Optional[Any] = state_dict["""decoder.embed_tokens.weight"""]
lowerCamelCase : List[str] = bart.extract_features(lowerCamelCase__)
if hf_checkpoint_name == "facebook/bart-large":
lowerCamelCase : List[str] = BartModel(lowerCamelCase__).eval()
model.load_state_dict(lowerCamelCase__)
lowerCamelCase : List[Any] = model(lowerCamelCase__).model[0]
else:
lowerCamelCase : Union[str, Any] = BartForConditionalGeneration(lowerCamelCase__).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase__)
if hasattr(lowerCamelCase__ , 'lm_head'):
lowerCamelCase : List[str] = make_linear_from_emb(model.model.shared)
lowerCamelCase : Optional[int] = model.model(lowerCamelCase__)[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''')
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`')
Path(lowerCamelCase__).mkdir(exist_ok=lowerCamelCase__)
model.save_pretrained(lowerCamelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
A = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 320
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : str =TypeVar('KEY')
__SCREAMING_SNAKE_CASE : Dict =TypeVar('VAL')
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
"""simple docstring"""
A__ : KEY
A__ : VAL
class SCREAMING_SNAKE_CASE__ ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
super().__init__(A , A )
def __bool__( self ) -> bool:
return False
__SCREAMING_SNAKE_CASE : Optional[Any] =_DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , A = 8 , A = 0.75 ) -> None:
A: int = initial_block_size
A: list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A: Any = capacity_factor
A: List[str] = 0
def a__ ( self , A ) -> int:
return hash(A ) % len(self._buckets )
def a__ ( self , A ) -> int:
return (ind + 1) % len(self._buckets )
def a__ ( self , A , A , A ) -> bool:
A: int = self._buckets[ind]
if not stored:
A: Union[str, Any] = _Item(A , A )
self._len += 1
return True
elif stored.key == key:
A: Any = _Item(A , A )
return True
else:
return False
def a__ ( self ) -> bool:
A: Optional[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(A )
def a__ ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
A: List[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ ( self , A ) -> None:
A: Optional[Any] = self._buckets
A: Optional[int] = [None] * new_size
A: Dict = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def a__ ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def a__ ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def a__ ( self , A ) -> Iterator[int]:
A: List[str] = self._get_bucket_index(A )
for _ in range(len(self._buckets ) ):
yield ind
A: Dict = self._get_next_ind(A )
def a__ ( self , A , A ) -> None:
for ind in self._iterate_buckets(A ):
if self._try_set(A , A , A ):
break
def __setitem__( self , A , A ) -> None:
if self._is_full():
self._size_up()
self._add_item(A , A )
def __delitem__( self , A ) -> None:
for ind in self._iterate_buckets(A ):
A: Optional[int] = self._buckets[ind]
if item is None:
raise KeyError(A )
if item is _deleted:
continue
if item.key == key:
A: Dict = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , A ) -> VAL:
for ind in self._iterate_buckets(A ):
A: List[str] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
A: List[Any] = """ ,""".join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 135
| 0
|
from pathlib import Path
import fire
def lowercase_ ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
snake_case = Path(A__ )
snake_case = Path(A__ )
dest_dir.mkdir(exist_ok=A__ )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A__ )
dest_path.open("w" ).write("\n".join(A__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 714
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> List[Any]:
"""simple docstring"""
snake_case = {}
if train_file is not None:
snake_case = [train_file]
if eval_file is not None:
snake_case = [eval_file]
if test_file is not None:
snake_case = [test_file]
snake_case = datasets.load_dataset("csv" , data_files=A__ )
snake_case = list(ds[list(files.keys() )[0]].features.keys() )
snake_case = features_name.pop(A__ )
snake_case = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case = {label: i for i, label in enumerate(A__ )}
snake_case = tokenizer.model_input_names
snake_case = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding="max_length" ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding="max_length" , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_A = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : int = field(metadata={"help": "Which column contains the label"} )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "The path of the training file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the development file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the test file"} )
UpperCAmelCase__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case = trainer.evaluate()
snake_case = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(A__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 294
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "git_vision_model"
def __init__( self : str , __lowerCamelCase : Optional[Any]=7_6_8 , __lowerCamelCase : Optional[Any]=3_0_7_2 , __lowerCamelCase : Any=1_2 , __lowerCamelCase : int=1_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=2_2_4 , __lowerCamelCase : List[Any]=1_6 , __lowerCamelCase : Dict="quick_gelu" , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.02 , **__lowerCamelCase : List[str] , ):
super().__init__(**__lowerCamelCase )
UpperCAmelCase__ :str = hidden_size
UpperCAmelCase__ :str = intermediate_size
UpperCAmelCase__ :List[str] = num_hidden_layers
UpperCAmelCase__ :Tuple = num_attention_heads
UpperCAmelCase__ :Optional[int] = num_channels
UpperCAmelCase__ :Tuple = patch_size
UpperCAmelCase__ :Dict = image_size
UpperCAmelCase__ :List[Any] = initializer_range
UpperCAmelCase__ :Union[str, Any] = attention_dropout
UpperCAmelCase__ :str = layer_norm_eps
UpperCAmelCase__ :List[Any] = hidden_act
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : int ):
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
UpperCAmelCase__ :Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "git"
def __init__( self : Any , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=3_0_5_2_2 , __lowerCamelCase : List[Any]=7_6_8 , __lowerCamelCase : Optional[Any]=6 , __lowerCamelCase : List[Any]=1_2 , __lowerCamelCase : List[str]=3_0_7_2 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[str]=1_0_2_4 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Dict="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Tuple=1_0_1 , __lowerCamelCase : Optional[Any]=1_0_2 , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Any , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , pad_token_id=__lowerCamelCase , **__lowerCamelCase )
if vision_config is None:
UpperCAmelCase__ :List[Any] = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
UpperCAmelCase__ :Optional[int] = GitVisionConfig(**__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = vocab_size
UpperCAmelCase__ :Any = hidden_size
UpperCAmelCase__ :Dict = num_hidden_layers
UpperCAmelCase__ :Optional[Any] = num_attention_heads
UpperCAmelCase__ :Optional[Any] = hidden_act
UpperCAmelCase__ :Optional[int] = intermediate_size
UpperCAmelCase__ :Dict = hidden_dropout_prob
UpperCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ :int = max_position_embeddings
UpperCAmelCase__ :Optional[Any] = initializer_range
UpperCAmelCase__ :Tuple = layer_norm_eps
UpperCAmelCase__ :List[str] = position_embedding_type
UpperCAmelCase__ :Optional[int] = use_cache
UpperCAmelCase__ :str = tie_word_embeddings
UpperCAmelCase__ :Tuple = num_image_with_embedding
UpperCAmelCase__ :Optional[int] = bos_token_id
UpperCAmelCase__ :Dict = eos_token_id
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :Tuple = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ :Dict = self.vision_config.to_dict()
UpperCAmelCase__ :Dict = self.__class__.model_type
return output
| 467
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False, False, False
@dataclass
class UpperCAmelCase :
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = "dict"
UpperCAmelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase = field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self : Any ):
return self.pa_type
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ :int = BytesIO()
sf.write(__lowerCamelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ :List[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
UpperCAmelCase__ :Optional[Any] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7
UpperCAmelCase__ :Optional[Any] = BytesIO(bytes() )
sf.write(__lowerCamelCase , __lowerCamelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
UpperCAmelCase__ , UpperCAmelCase__ :str = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
UpperCAmelCase__ :List[str] = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
UpperCAmelCase__ :Optional[Any] = token_per_repo_id or {}
UpperCAmelCase__ :str = path.split('''::''' )[-1]
try:
UpperCAmelCase__ :Tuple = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase__ :str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ :Tuple = None
with xopen(__lowerCamelCase , '''rb''' , use_auth_token=__lowerCamelCase ) as f:
UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = sf.read(__lowerCamelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = sf.read(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = array.T
if self.mono:
UpperCAmelCase__ :Any = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ :Union[str, Any] = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate )
UpperCAmelCase__ :List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
UpperCAmelCase__ :Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ :str = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :int = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
UpperCAmelCase__ :Any = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase__ :str = storage.field('''bytes''' )
else:
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase__ :Optional[int] = storage.field('''path''' )
else:
UpperCAmelCase__ :Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Dict ):
with xopen(__lowerCamelCase , '''rb''' ) as f:
UpperCAmelCase__ :Any = f.read()
return bytes_
UpperCAmelCase__ :Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ :Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ :Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
| 467
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = (DPMSolverSDEScheduler,)
lowercase : Dict = 10
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
A : Dict ={
'num_train_timesteps': 11_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =self.scheduler_classes[0]
A : Tuple =self.get_scheduler_config()
A : Tuple =scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps )
A : Dict =self.dummy_model()
A : Any =self.dummy_sample_deter * scheduler.init_noise_sigma
A : Union[str, Any] =sample.to(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
A : Optional[int] =scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : List[str] =output.prev_sample
A : Optional[Any] =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
A : List[str] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
A : Any =self.scheduler_classes[0]
A : Dict =self.get_scheduler_config(prediction_type='v_prediction' )
A : List[Any] =scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps )
A : Tuple =self.dummy_model()
A : Dict =self.dummy_sample_deter * scheduler.init_noise_sigma
A : List[str] =sample.to(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
A : Dict =scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple =output.prev_sample
A : Union[str, Any] =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
A : List[Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
A : Dict =self.scheduler_classes[0]
A : str =self.get_scheduler_config()
A : Optional[int] =scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.dummy_model()
A : List[str] =self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A : Union[str, Any] =scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : List[Any] =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =output.prev_sample
A : Union[str, Any] =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
A : Optional[int] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : List[str] =self.scheduler_classes[0]
A : List[str] =self.get_scheduler_config()
A : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE__ , use_karras_sigmas=SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =self.dummy_model()
A : Tuple =self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE__ ) * scheduler.init_noise_sigma
A : str =sample.to(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
A : Any =scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Tuple =scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =output.prev_sample
A : Tuple =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
A : str =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
| 718
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
snake_case : Optional[Any] = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = """retribert"""
def __init__( self : Tuple , UpperCamelCase_ : List[str]=3_0_5_2_2 , UpperCamelCase_ : List[str]=7_6_8 , UpperCamelCase_ : Dict=8 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : List[str]=3_0_7_2 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Optional[int]=5_1_2 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : List[str]=1e-1_2 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]=1_2_8 , UpperCamelCase_ : int=0 , **UpperCamelCase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = share_encoders
__magic_name__ = projection_dim
| 545
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
def UpperCAmelCase__( __UpperCAmelCase : TreeNode | None ):
# Validation
def is_valid_tree(__UpperCAmelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__UpperCAmelCase ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
__UpperCAmelCase : TreeNode | None , __UpperCAmelCase : float , __UpperCAmelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __UpperCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __UpperCAmelCase )
)
return is_binary_search_tree_recursive_check(__UpperCAmelCase , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
A = ''
for word_or_phrase in separated:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 109
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowerCAmelCase_ : Tuple = """ssube/stable-diffusion-x4-upscaler-onnx"""
def A_ ( self : Any , snake_case : Union[str, Any]=0 ) -> Dict:
'''simple docstring'''
A = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case ) )
A = torch.manual_seed(snake_case )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A_ ( self : List[str] ) -> str:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A = ort.SessionOptions()
A = False
return options
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case )
A = 'A fantasy landscape, trending on artstation'
A = torch.manual_seed(0 )
A = pipe(
prompt=snake_case , image=snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case , output_type='np' , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((128, 128) )
A = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case )
A = 'A fantasy landscape, trending on artstation'
A = torch.manual_seed(0 )
A = pipe(
prompt=snake_case , image=snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case , output_type='np' , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 109
| 1
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if isinstance(__lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCAmelCase :
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
pass
def _a ( self ):
pass
def _a ( self ):
pass
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =np.abs((a - b) ).max()
self.assertLessEqual(_lowerCamelCase , _lowerCamelCase , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
lowerCamelCase__ =VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =FlaxVisionTextDualEncoderModel(_lowerCamelCase )
lowerCamelCase__ =model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ =self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ ={"vision_model": vision_model, "text_model": text_model}
lowerCamelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
lowerCamelCase__ =model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ =self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ ={"vision_model": vision_model, "text_model": text_model}
lowerCamelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
lowerCamelCase__ =model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase )
lowerCamelCase__ =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase )
lowerCamelCase__ =model(input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase )
lowerCamelCase__ =after_output[0]
lowerCamelCase__ =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1E-3 )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ =self.get_vision_text_model(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ ={"vision_model": vision_model, "text_model": text_model}
lowerCamelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCamelCase )
lowerCamelCase__ =model(
input_ids=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , output_attentions=_lowerCamelCase )
lowerCamelCase__ =output.vision_model_output.attentions
self.assertEqual(len(_lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ =to_atuple(vision_model.config.image_size )
lowerCamelCase__ =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ =output.text_model_output.attentions
self.assertEqual(len(_lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
pt_model.to(_lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ =inputs_dict
lowerCamelCase__ ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ =pt_model(**_lowerCamelCase ).to_tuple()
lowerCamelCase__ =fx_model(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowerCamelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
lowerCamelCase__ =fx_model_loaded(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowerCamelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =VisionTextDualEncoderModel.from_pretrained(_lowerCamelCase , from_flax=_lowerCamelCase )
pt_model_loaded.to(_lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ =pt_model_loaded(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowerCamelCase , pt_output_loaded.numpy() , 4E-2 )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =VisionTextDualEncoderModel(_lowerCamelCase )
lowerCamelCase__ =FlaxVisionTextDualEncoderModel(_lowerCamelCase )
lowerCamelCase__ =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCamelCase )
lowerCamelCase__ =fx_state
self.check_pt_flax_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =VisionTextDualEncoderModel(_lowerCamelCase )
lowerCamelCase__ =FlaxVisionTextDualEncoderModel(_lowerCamelCase )
lowerCamelCase__ =load_flax_weights_in_pytorch_model(_lowerCamelCase , fx_model.params )
self.check_pt_flax_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.prepare_config_and_inputs()
self.check_save_load(**_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCamelCase )
@is_pt_flax_cross_test
def _a ( self ):
lowerCamelCase__ =self.prepare_config_and_inputs()
lowerCamelCase__ =config_inputs_dict.pop("vision_config" )
lowerCamelCase__ =config_inputs_dict.pop("text_config" )
lowerCamelCase__ =config_inputs_dict
self.check_equivalence_pt_to_flax(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.check_equivalence_flax_to_pt(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ =self.get_pretrained_model_and_inputs()
lowerCamelCase__ =model_a(**_lowerCamelCase )
lowerCamelCase__ =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCamelCase )
lowerCamelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCamelCase )
lowerCamelCase__ =model_a(**_lowerCamelCase )
lowerCamelCase__ =after_outputs[0]
lowerCamelCase__ =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1E-5 )
@require_flax
class __UpperCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
def _a ( self ):
lowerCamelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_lowerCamelCase , text_from_pt=_lowerCamelCase , )
lowerCamelCase__ =13
lowerCamelCase__ =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ =ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCamelCase__ =random_attention_mask([batch_size, 4] )
lowerCamelCase__ ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =FlaxViTModel(_lowerCamelCase )
lowerCamelCase__ =FlaxBertModel(_lowerCamelCase )
return vision_model, text_model
def _a ( self ):
lowerCamelCase__ =FlaxViTModelTester(self )
lowerCamelCase__ =FlaxBertModelTester(self )
lowerCamelCase__ =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
def _a ( self ):
lowerCamelCase__ =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_lowerCamelCase , text_from_pt=_lowerCamelCase , )
lowerCamelCase__ =13
lowerCamelCase__ =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ =ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCamelCase__ =random_attention_mask([batch_size, 4] )
lowerCamelCase__ ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =FlaxCLIPVisionModel(_lowerCamelCase )
lowerCamelCase__ =FlaxBertModel(_lowerCamelCase )
return vision_model, text_model
def _a ( self ):
lowerCamelCase__ =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ =FlaxBertModelTester(self )
lowerCamelCase__ =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def _a ( self ):
lowerCamelCase__ =FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
lowerCamelCase__ =VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
lowerCamelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase__ =processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_lowerCamelCase , padding=_lowerCamelCase , return_tensors="np" )
lowerCamelCase__ =model(**_lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCamelCase__ =np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _lowerCamelCase , atol=1E-3 ) )
| 530
|
"""simple docstring"""
from __future__ import annotations
a ='#'
class __UpperCAmelCase :
def __init__( self ):
lowerCamelCase__ ={}
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ =self._trie
for char in text:
if char not in trie:
lowerCamelCase__ ={}
lowerCamelCase__ =trie[char]
lowerCamelCase__ =True
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ =self._trie
for char in prefix:
if char in trie:
lowerCamelCase__ =trie[char]
else:
return []
return self._elements(_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ =[]
for c, v in d.items():
lowerCamelCase__ =[" "] if c == END else [(c + s) for s in self._elements(_lowerCamelCase )]
result.extend(_lowerCamelCase )
return tuple(_lowerCamelCase )
a =Trie()
a =('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def lowerCamelCase_ ( __lowerCAmelCase ) -> tuple:
'''simple docstring'''
lowerCamelCase__ =trie.find_word(__lowerCAmelCase )
return tuple(string + word for word in suffixes )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 530
| 1
|
"""simple docstring"""
_lowerCamelCase = [
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __lowercase ( lowerCamelCase_ : str ):
SCREAMING_SNAKE_CASE__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
while place < len(lowerCamelCase_ ):
if (place + 1 < len(lowerCamelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowercase ( lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = divmod(lowerCamelCase_ , lowerCamelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (FalconForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : str = False
_lowerCAmelCase : Dict = False
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = "single_label_classification"
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = "multi_label_classification"
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , "use_cache" ):
return
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ = (
getattr(UpperCAmelCase__ , "decoder_layers" , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , "num_decoder_layers" , UpperCAmelCase__ )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase__ , "num_kv_heads" , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase__ , "d_model" , config.hidden_size )
SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ = outputs["past_key_values"]
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = inputs["input_ids"].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def lowerCAmelCase__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 112
| 0
|
import math
import sys
import cva
import numpy as np
def __A ( _A , _A ):
"""simple docstring"""
__a = math.sqrt(_A )
__a = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __A ( _A , _A ):
"""simple docstring"""
__a = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
__a = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __A ( _A , _A , _A , _A , ):
"""simple docstring"""
__a = np.zeros(img.shape )
__a = get_gauss_kernel(_A , _A )
__a , __a = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a = get_slice(_A , _A , _A , _A )
__a = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a = vec_gaussian(_A , _A )
__a = np.multiply(_A , _A )
__a = np.multiply(_A , _A )
__a = np.sum(_A ) / np.sum(_A )
__a = val
return imga
def __A ( _A ):
"""simple docstring"""
__a = args[1] if args[1:] else "../image_data/lena.jpg"
__a = float(args[2] ) if args[2:] else 1.0
__a = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a = int(args[4] )
__a = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = parse_args(sys.argv)
SCREAMING_SNAKE_CASE : Dict = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE : str = img / 255
SCREAMING_SNAKE_CASE : str = out.astype("""float32""")
SCREAMING_SNAKE_CASE : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE : Tuple = out * 255
SCREAMING_SNAKE_CASE : Dict = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 197
|
'''simple docstring'''
import socket
def __snake_case ( ):
snake_case_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case_ = socket.gethostname()
snake_case_ = 12_312
sock.connect((host, port) )
sock.send(b"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
snake_case_ = sock.recv(1_024 )
if not data:
break
out_file.write(lowercase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 508
| 0
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Tuple = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'umt5'
_A = ['past_key_values']
def __init__( self , __UpperCamelCase=25_01_12 , __UpperCamelCase=5_12 , __UpperCamelCase=64 , __UpperCamelCase=10_24 , __UpperCamelCase=8 , __UpperCamelCase=None , __UpperCamelCase=6 , __UpperCamelCase=32 , __UpperCamelCase=1_28 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-6 , __UpperCamelCase=1.0 , __UpperCamelCase="gated-gelu" , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="T5Tokenizer" , __UpperCamelCase=True , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=0 , **__UpperCamelCase , )-> str:
super().__init__(
is_encoder_decoder=__UpperCamelCase , tokenizer_class=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : str = d_model
UpperCAmelCase__ : Dict = d_kv
UpperCAmelCase__ : List[str] = d_ff
UpperCAmelCase__ : Tuple = num_layers
UpperCAmelCase__ : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase__ : Optional[int] = num_heads
UpperCAmelCase__ : Dict = relative_attention_num_buckets
UpperCAmelCase__ : Any = relative_attention_max_distance
UpperCAmelCase__ : int = dropout_rate
UpperCAmelCase__ : Optional[Any] = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_factor
UpperCAmelCase__ : Optional[Any] = feed_forward_proj
UpperCAmelCase__ : Optional[int] = use_cache
UpperCAmelCase__ : str = self.feed_forward_proj.split("-" )
UpperCAmelCase__ : List[str] = act_info[-1]
UpperCAmelCase__ : Optional[Any] = act_info[0] == "gated"
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
UpperCAmelCase__ : Union[str, Any] = "gelu_new"
@property
def lowerCAmelCase__ ( self )-> Optional[Any]:
return self.d_model
@property
def lowerCAmelCase__ ( self )-> Dict:
return self.num_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.num_layers
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
UpperCAmelCase__ : Any = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase__ : Optional[int] = "past_encoder_sequence + sequence"
UpperCAmelCase__ : List[str] = {0: "batch"}
UpperCAmelCase__ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase__ : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase__ : Dict = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase__ ( self )-> int:
return 13
@property
def lowerCAmelCase__ ( self )-> float:
return 5E-4
| 701
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660
| 0
|
import os
import numpy
import onnx
def UpperCamelCase ( snake_case__ : int ,snake_case__ : str ):
'''simple docstring'''
__snake_case :Dict = a.name
__snake_case :List[Any] = b.name
__snake_case :List[Any] = """"""
__snake_case :List[Any] = """"""
__snake_case :Any = a == b
__snake_case :List[Any] = name_a
__snake_case :List[str] = name_b
return res
def UpperCamelCase ( snake_case__ : Optional[int] ,snake_case__ : int ,snake_case__ : int ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case__ ,snake_case__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,snake_case__ ,snake_case__ )
_graph_replace_input_with(node_proto.attribute[1].g ,snake_case__ ,snake_case__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,snake_case__ ,snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ,snake_case__ : Optional[int] ,snake_case__ : str ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(snake_case__ ,snake_case__ ,snake_case__ )
def UpperCamelCase ( snake_case__ : int ,snake_case__ : Optional[int] ,snake_case__ : Tuple ):
'''simple docstring'''
__snake_case :Union[str, Any] = list(model.graph.initializer )
__snake_case :str = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case :Union[str, Any] = inits[i].name
__snake_case :Any = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,snake_case__ ,snake_case__ )
def UpperCamelCase ( snake_case__ : Union[str, Any] ):
'''simple docstring'''
__snake_case :Union[str, Any] = os.path.dirname(snake_case__ )
__snake_case :Dict = os.path.basename(snake_case__ )
__snake_case :int = onnx.load(os.path.join(snake_case__ ,snake_case__ ) )
__snake_case :Optional[Any] = list(model.graph.initializer )
__snake_case :int = set()
__snake_case :Tuple = {}
__snake_case :str = []
__snake_case :Optional[Any] = 0
for i in range(len(snake_case__ ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(snake_case__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(snake_case__ )
dup_set.add(snake_case__ )
__snake_case :Any = inits[j].data_type
__snake_case :str = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ ,snake_case__ )
total_reduced_size += mem_size
__snake_case :Dict = inits[i].name
__snake_case :Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case__ )
else:
__snake_case :Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ ,total_reduced_size / 1024 / 1024 / 1024 ,"""GB""" )
__snake_case :Dict = sorted(snake_case__ )
_remove_dup_initializers_from_model(snake_case__ ,snake_case__ ,snake_case__ )
__snake_case :Any = """optimized_""" + model_file_name
__snake_case :List[Any] = os.path.join(snake_case__ ,snake_case__ )
onnx.save(snake_case__ ,snake_case__ )
return new_model
| 455
|
def UpperCamelCase ( snake_case__ : float ,snake_case__ : int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(snake_case__ ) ,snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 455
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A__ : str = AltDiffusionPipeline
A__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
A__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def A__ ( self: int ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
UpperCAmelCase_ : Dict = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=_a ,set_alpha_to_one=_a ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase_ : str = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
UpperCAmelCase_ : int = CLIPTextModel(_a )
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
UpperCAmelCase_ : Union[str, Any] = 77
UpperCAmelCase_ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ ( self: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=0 ) -> Any:
if str(_a ).startswith("""mps""" ):
UpperCAmelCase_ : Dict = torch.manual_seed(_a )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_a ).manual_seed(_a )
UpperCAmelCase_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def A__ ( self: Any ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self: List[Any] ) -> Any:
UpperCAmelCase_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(_a )
UpperCAmelCase_ : Optional[int] = text_encoder
UpperCAmelCase_ : str = AltDiffusionPipeline(**_a )
UpperCAmelCase_ : Any = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_a )
UpperCAmelCase_ : int = """A photo of an astronaut"""
UpperCAmelCase_ : int = alt_pipe(**_a )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
UpperCAmelCase_ : str = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : List[str] = RobertaSeriesModelWithTransformation(_a )
UpperCAmelCase_ : List[Any] = text_encoder
UpperCAmelCase_ : Any = AltDiffusionPipeline(**_a )
UpperCAmelCase_ : Optional[Any] = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_a )
UpperCAmelCase_ : Tuple = alt_pipe(**_a )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Any = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,safety_checker=_a )
UpperCAmelCase_ : Any = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Dict = alt_pipe([prompt] ,generator=_a ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="""np""" )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : List[str] = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> str:
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" ,subfolder="""scheduler""" )
UpperCAmelCase_ : str = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,scheduler=_a ,safety_checker=_a )
UpperCAmelCase_ : Dict = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : List[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : int = alt_pipe([prompt] ,generator=_a ,num_inference_steps=2 ,output_type="""numpy""" )
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Dict = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 705
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase_ = 50000
UpperCamelCase_ = 5000
UpperCamelCase_ ,UpperCamelCase_ = os.path.split(__file__)
UpperCamelCase_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : int ):
'''simple docstring'''
for i in range(_a ):
UpperCAmelCase_ : List[str] = dataset[i]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Union[str, Any] , _a : int ):
'''simple docstring'''
for i in range(0 , len(_a ) , _a ):
UpperCAmelCase_ : Any = dataset[i : i + batch_size]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Union[str, Any] , _a : str ):
'''simple docstring'''
with dataset.formatted_as(type=_a ):
for i in range(_a ):
UpperCAmelCase_ : int = dataset[i]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Optional[Any] , _a : Tuple , _a : Optional[Any] ):
'''simple docstring'''
with dataset.formatted_as(type=_a ):
for i in range(0 , _a , _a ):
UpperCAmelCase_ : Any = dataset[i : i + batch_size]
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ : List[str] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
UpperCAmelCase_ : List[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCAmelCase_ : Optional[Any] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCAmelCase_ : Optional[Any] = generate_example_dataset(
os.path.join(_a , """dataset.arrow""" ) , _a , num_examples=_a , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(_a ) )
UpperCAmelCase_ : str = func(_a , **_a )
print("""shuffling dataset""" )
UpperCAmelCase_ : int = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(_a ) )
UpperCAmelCase_ : Any = func(
_a , **_a )
with open(_a , """wb""" ) as f:
f.write(json.dumps(_a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 322
| 0
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def UpperCamelCase_ ( A__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : str = np.nan
for i in range(A__ ):
lowerCAmelCase_ : Optional[int] = features[:, labels == i]
lowerCAmelCase_ : Optional[int] = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase_ : Tuple = data - column_reshape(A__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase_ : List[str] = np.dot(A__ , centered_data.T )
return covariance_sum / features.shape[1]
def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : int = features.mean(1 )
lowerCAmelCase_ : Optional[int] = np.nan
for i in range(A__ ):
lowerCAmelCase_ : Tuple = features[:, labels == i]
lowerCAmelCase_ : Union[str, Any] = data.shape[1]
lowerCAmelCase_ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase_ : Tuple = device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
return covariance_sum / features.shape[1]
def UpperCamelCase_ ( A__ : np.ndarray , A__ : int ):
'''simple docstring'''
if features.any():
lowerCAmelCase_ : Dict = features.mean(1 )
# Center the dataset
lowerCAmelCase_ : str = features - np.reshape(A__ , (data_mean.size, 1) )
lowerCAmelCase_ : int = np.dot(A__ , centered_data.T ) / features.shape[1]
lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = np.linalg.eigh(A__ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase_ : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase_ : int = np.dot(filtered_eigenvectors.T , A__ )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=A__ )
logging.error("""Dataset empty""" )
raise AssertionError
def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : int , A__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = eigh(
covariance_between_classes(A__ , A__ , A__ ) , covariance_within_classes(A__ , A__ , A__ ) , )
lowerCAmelCase_ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[Any] = np.linalg.svd(A__ )
lowerCAmelCase_ : Optional[Any] = svd_matrix[:, 0:dimensions]
lowerCAmelCase_ : str = np.dot(filtered_svd_matrix.T , A__ )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=A__ )
logging.error("""Dataset empty""" )
raise AssertionError
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase_ : Optional[Any] = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A__ ) as error_info:
lowerCAmelCase_ : Tuple = linear_discriminant_analysis(
A__ , A__ , A__ , A__ )
if isinstance(A__ , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase_ : Optional[Any] = 2
lowerCAmelCase_ : List[str] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(A__ ) as error_info:
lowerCAmelCase_ : Optional[int] = principal_component_analysis(A__ , A__ )
if not np.allclose(A__ , A__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__A : Union[str, Any] = TypeVar("KEY")
__A : Union[str, Any] = TypeVar("VAL")
@dataclass(frozen=_SCREAMING_SNAKE_CASE ,slots=_SCREAMING_SNAKE_CASE)
class __snake_case ( Generic[KEY, VAL]):
"""simple docstring"""
lowercase = 42
lowercase = 42
class __snake_case ( _Item):
"""simple docstring"""
def __init__( self : int ) -> None:
super().__init__(lowerCamelCase , lowerCamelCase )
def __bool__( self : Tuple ) -> bool:
return False
__A : Any = _DeletedItem()
class __snake_case ( MutableMapping[KEY, VAL]):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : int = 8 , lowerCamelCase : float = 0.75 ) -> None:
lowerCAmelCase_ : str = initial_block_size
lowerCAmelCase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase_ : Optional[Any] = capacity_factor
lowerCAmelCase_ : List[Any] = 0
def __lowercase ( self : List[Any] , lowerCamelCase : KEY ) -> int:
return hash(lowerCamelCase ) % len(self._buckets )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int:
return (ind + 1) % len(self._buckets )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : KEY , lowerCamelCase : VAL ) -> bool:
lowerCAmelCase_ : Union[str, Any] = self._buckets[ind]
if not stored:
lowerCAmelCase_ : List[Any] = _Item(lowerCamelCase , lowerCamelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase_ : List[str] = _Item(lowerCamelCase , lowerCamelCase )
return True
else:
return False
def __lowercase ( self : Optional[Any] ) -> bool:
lowerCAmelCase_ : str = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase )
def __lowercase ( self : Dict ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __lowercase ( self : Tuple , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Union[str, Any] = self._buckets
lowerCAmelCase_ : str = [None] * new_size
lowerCAmelCase_ : str = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __lowercase ( self : Tuple ) -> None:
self._resize(len(self._buckets ) * 2 )
def __lowercase ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __lowercase ( self : Optional[int] , lowerCamelCase : KEY ) -> Iterator[int]:
lowerCAmelCase_ : int = self._get_bucket_index(lowerCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase_ : Dict = self._get_next_ind(lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : KEY , lowerCamelCase : VAL ) -> None:
for ind in self._iterate_buckets(lowerCamelCase ):
if self._try_set(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
break
def __setitem__( self : Tuple , lowerCamelCase : KEY , lowerCamelCase : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase , lowerCamelCase )
def __delitem__( self : Optional[int] , lowerCamelCase : KEY ) -> None:
for ind in self._iterate_buckets(lowerCamelCase ):
lowerCAmelCase_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase_ : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , lowerCamelCase : KEY ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase ):
lowerCAmelCase_ : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase )
def __len__( self : Any ) -> int:
return self._len
def __iter__( self : Optional[Any] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[Any] ) -> str:
lowerCAmelCase_ : int = """ ,""".join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 275
| 1
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 3.0
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[Any])-> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {})
self.assertDictEqual(MockClass(a=2).to_kwargs() , {"""a""": 2})
self.assertDictEqual(MockClass(a=2 , b=snake_case_).to_kwargs() , {"""a""": 2, """b""": True})
self.assertDictEqual(MockClass(a=2 , c=2.2_5).to_kwargs() , {"""a""": 2, """c""": 2.2_5})
@require_cuda
def UpperCamelCase ( self : str)-> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__lowerCAmelCase =GradScalerKwargs(init_scale=10_24 , growth_factor=2)
AcceleratorState._reset_state()
__lowerCAmelCase =Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
__lowerCAmelCase =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0)
self.assertEqual(scaler._growth_factor , 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5)
self.assertEqual(scaler._growth_interval , 20_00)
self.assertEqual(scaler._enabled , snake_case_)
@require_multi_gpu
def UpperCamelCase ( self : str)-> Optional[Any]:
__lowerCAmelCase =["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)]
execute_subprocess_async(snake_case_ , env=os.environ.copy())
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_00, 2_00)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ''''''
lowercase_ = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 456
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase_ = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def __lowerCAmelCase ( __lowerCamelCase : List[str] ) -> int:
__lowerCAmelCase =(images / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCAmelCase =numpy_to_pil(__lowerCamelCase )
return images
def __lowerCAmelCase ( __lowerCamelCase : str ) -> str:
if images.ndim == 3:
__lowerCAmelCase =images[None, ...]
__lowerCAmelCase =(images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__lowerCAmelCase =[Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
__lowerCAmelCase =[Image.fromarray(__lowerCamelCase ) for image in images]
return pil_images
| 456
| 1
|
def _A (UpperCamelCase : int = 10**9 ) ->int:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : Union[str, Any] = 2
lowerCamelCase__ : Any = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCamelCase__ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 157
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __A :
UpperCamelCase :Dict = PegasusConfig
UpperCamelCase :Dict = {}
UpperCamelCase :Union[str, Any] = '''gelu'''
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=False , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=20 , __magic_name__=2 , __magic_name__=1 , __magic_name__=0 , ):
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : str = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : Tuple = pad_token_id
lowerCamelCase__ : List[str] = bos_token_id
def _snake_case (self ):
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCamelCase__ : Any = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ : Any = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ : Dict = prepare_pegasus_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Optional[int] = 20
lowerCamelCase__ : str = model_class_name(__magic_name__ )
lowerCamelCase__ : List[str] = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ )
lowerCamelCase__ : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : Dict = model.decode(__magic_name__ , __magic_name__ )
lowerCamelCase__ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : List[str] = 20
lowerCamelCase__ : Optional[int] = model_class_name(__magic_name__ )
lowerCamelCase__ : List[Any] = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase__ ,lowerCamelCase__ : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase__ : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase__ : str = model.decode(
decoder_input_ids[:, -1:] , __magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__magic_name__ , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : Optional[int] = model.decode(__magic_name__ , __magic_name__ , decoder_attention_mask=__magic_name__ )
lowerCamelCase__ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _A (UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , ) ->Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__ : List[Any] = np.not_equal(UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase :int = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Optional[int] = False
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[str] = False
def _snake_case (self ):
lowerCamelCase__ : Dict = FlaxPegasusModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__magic_name__ , __magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Any = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowerCamelCase__ : Optional[Any] = model_class(__magic_name__ )
@jax.jit
def encode_jitted(__magic_name__ , __magic_name__=None , **__magic_name__ ):
return model.encode(input_ids=__magic_name__ , attention_mask=__magic_name__ )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase__ : str = encode_jitted(**__magic_name__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase__ : Optional[Any] = encode_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Tuple = model_class(__magic_name__ )
lowerCamelCase__ : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCamelCase__ : str = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__magic_name__ , __magic_name__ , __magic_name__ ):
return model.decode(
decoder_input_ids=__magic_name__ , decoder_attention_mask=__magic_name__ , encoder_outputs=__magic_name__ , )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase__ : int = decode_jitted(**__magic_name__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase__ : Optional[int] = decode_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case (self ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__magic_name__ )
lowerCamelCase__ : List[Any] = np.ones((1, 1) )
lowerCamelCase__ : Optional[int] = model(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
def _snake_case (self ):
lowerCamelCase__ : str = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase__ : Any = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase__ : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowerCamelCase__ : str = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowerCamelCase__ : Optional[Any] = tokenizer(__magic_name__ , return_tensors="""np""" , truncation=__magic_name__ , max_length=512 , padding=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = model.generate(**__magic_name__ , num_beams=2 ).sequences
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
assert tgt_text == decoded
| 157
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase :Any = ''
__lowerCAmelCase :str = ''
__lowerCAmelCase :Optional[int] = ''
__lowerCAmelCase :Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def A ( ):
_snake_case , _snake_case : int = get_dataset(UpperCAmelCase , UpperCAmelCase )
print("Processing..." )
_snake_case , _snake_case , _snake_case : int = update_image_and_anno(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
for index, image in enumerate(UpperCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_snake_case : Tuple = random_chars(32 )
_snake_case : Union[str, Any] = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
_snake_case : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(UpperCAmelCase )} with {file_name}""" )
_snake_case : Tuple = []
for anno in new_annos[index]:
_snake_case : str = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCAmelCase )
with open(F"""/{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def A ( UpperCAmelCase , UpperCAmelCase ):
_snake_case : List[Any] = []
_snake_case : Optional[int] = []
for label_file in glob.glob(os.path.join(UpperCAmelCase , "*.txt" ) ):
_snake_case : List[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(UpperCAmelCase ) as in_file:
_snake_case : List[Any] = in_file.readlines()
_snake_case : List[str] = os.path.join(UpperCAmelCase , F"""{label_name}.jpg""" )
_snake_case : int = []
for obj_list in obj_lists:
_snake_case : Any = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCAmelCase )
labels.append(UpperCAmelCase )
return img_paths, labels
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 ):
_snake_case : Dict = []
_snake_case : Optional[Any] = []
_snake_case : int = []
for idx in range(len(UpperCAmelCase ) ):
_snake_case : int = []
_snake_case : Tuple = img_list[idx]
path_list.append(UpperCAmelCase )
_snake_case : str = anno_list[idx]
_snake_case : str = cva.imread(UpperCAmelCase )
if flip_type == 1:
_snake_case : Dict = cva.flip(UpperCAmelCase , UpperCAmelCase )
for bbox in img_annos:
_snake_case : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_snake_case : Any = cva.flip(UpperCAmelCase , UpperCAmelCase )
for bbox in img_annos:
_snake_case : Tuple = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCAmelCase )
new_imgs_list.append(UpperCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def A ( UpperCAmelCase = 32 ):
assert number_char > 1, "The number of character should greater than 1"
_snake_case : Dict = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase ) for _ in range(UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 278
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCAmelCase :List[Any] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def A ( UpperCAmelCase ):
_snake_case : List[str] = test_results.split(" " )
_snake_case : Optional[int] = 0
_snake_case : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_snake_case : Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A ( UpperCAmelCase ):
_snake_case : Union[str, Any] = {}
_snake_case : Any = None
_snake_case : str = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , UpperCAmelCase ):
_snake_case : Union[str, Any] = True
_snake_case : Tuple = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_snake_case : Optional[Any] = line
_snake_case : Dict = False
return failures
class _a:
def __init__( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
_snake_case : Dict = title
_snake_case : Optional[Any] = doc_test_results["time_spent"].split("," )[0]
_snake_case : Dict = doc_test_results["success"]
_snake_case : Optional[Any] = doc_test_results["failures"]
_snake_case : Tuple = self.n_success + self.n_failures
# Failures and success of the modeling tests
_snake_case : Union[str, Any] = doc_test_results
@property
def lowercase ( self ) -> str:
'''simple docstring'''
_snake_case : Dict = [self._time_spent]
_snake_case : Tuple = 0
for time in time_spent:
_snake_case : str = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__snake_case ) == 1:
_snake_case : List[Any] = [0, 0, time_parts[0]]
_snake_case , _snake_case , _snake_case : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
_snake_case , _snake_case , _snake_case : List[str] = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f"""{int(__snake_case )}h{int(__snake_case )}m{int(__snake_case )}s"""
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[str] = 4_0
_snake_case : Any = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__snake_case , __snake_case )}
_snake_case : int = ""
for category, failures in category_failures.items():
if len(__snake_case ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__snake_case )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def lowercase ( self ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__snake_case )
@staticmethod
def lowercase ( ) -> Dict:
'''simple docstring'''
_snake_case : Dict = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__snake_case )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__snake_case , )
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_snake_case : List[str] = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else "All tests passed."
_snake_case : Tuple = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__snake_case , )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
_snake_case : str = ""
for key, value in failures.items():
_snake_case : Any = value[:2_0_0] + " [Truncated]" if len(__snake_case ) > 2_5_0 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_snake_case : str = job_name
_snake_case : List[str] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_snake_case : Union[str, Any] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_snake_case : Optional[int] = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_snake_case : Tuple = sorted(self.doc_test_results.items() , key=lambda __snake_case : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_snake_case : Tuple = f"""*Num failures* :{len(job_result['failed'] )} \n"""
_snake_case : Tuple = job_result["failures"]
_snake_case : Tuple = self.get_reply_blocks(__snake_case , __snake_case , __snake_case , text=__snake_case )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f"""Results for {job}""" , blocks=__snake_case , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def A ( ):
_snake_case : Optional[Any] = os.environ["GITHUB_RUN_ID"]
_snake_case : Union[str, Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_snake_case : List[Any] = requests.get(UpperCAmelCase ).json()
_snake_case : str = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_snake_case : Union[str, Any] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(UpperCAmelCase ):
_snake_case : Any = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , UpperCAmelCase )
return {}
def A ( UpperCAmelCase ):
_snake_case : Optional[int] = {}
if os.path.exists(UpperCAmelCase ):
_snake_case : Optional[Any] = os.listdir(UpperCAmelCase )
for file in files:
try:
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , encoding="utf-8" ) as f:
_snake_case : Optional[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(UpperCAmelCase , UpperCAmelCase )}.""" ) from e
return _artifact
def A ( ):
class _a:
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = name
_snake_case : Any = []
def __str__( self ) -> Tuple:
'''simple docstring'''
return self.name
def lowercase ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
_snake_case : Dict[str, Artifact] = {}
_snake_case : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
_snake_case : Optional[int] = directory
if artifact_name not in _available_artifacts:
_snake_case : Optional[Any] = Artifact(UpperCAmelCase )
_available_artifacts[artifact_name].add_path(UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
__lowerCAmelCase :str = get_job_links()
__lowerCAmelCase :Optional[int] = retrieve_available_artifacts()
__lowerCAmelCase :Dict = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCAmelCase :Any = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCAmelCase :str = github_actions_job_links.get('run_doctests')
__lowerCAmelCase :List[Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__lowerCAmelCase :Optional[Any] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase :Dict = handle_test_results(artifact['stats'])
__lowerCAmelCase :List[Any] = failed
__lowerCAmelCase :Optional[int] = success
__lowerCAmelCase :str = time_spent[1:-1] + ', '
__lowerCAmelCase :Optional[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__lowerCAmelCase :Any = line.replace('FAILED ', '')
__lowerCAmelCase :int = line.split()[0].replace('\n', '')
if "::" in line:
__lowerCAmelCase , __lowerCAmelCase :List[str] = line.split('::')
else:
__lowerCAmelCase , __lowerCAmelCase :int = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCAmelCase :Any = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCAmelCase :Union[str, Any] = all_failures[test] if test in all_failures else 'N/A'
__lowerCAmelCase :Optional[Any] = failure
break
__lowerCAmelCase :Optional[int] = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 278
| 1
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = OpenAIGPTTokenizer
UpperCamelCase : List[str] = OpenAIGPTTokenizerFast
UpperCamelCase : Dict = True
UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__A : Dict = dict(zip(_A , range(len(_A ) ) ) )
__A : Any = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_A ) )
def UpperCAmelCase_ ( self , _A ):
return "lower newer", "lower newer"
def UpperCAmelCase_ ( self ):
__A : int = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__A : Optional[int] = 'lower'
__A : Union[str, Any] = ['low', 'er</w>']
__A : int = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__A : List[Any] = tokens + ['<unk>']
__A : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def UpperCAmelCase_ ( self , _A=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__A : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
__A : List[str] = 'This is a simple input'
__A : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
__A : Optional[Any] = ('This is a simple input', 'This is a pair')
__A : Optional[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='max_length' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='max_length' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='max_length' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='max_length' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='max_length' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='max_length' , )
def UpperCAmelCase_ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _A( snake_case__ ):
"""simple docstring"""
pass
| 239
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = '''timesformer'''
def __init__( self , _A=224 , _A=16 , _A=3 , _A=8 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1e-6 , _A=True , _A="divided_space_time" , _A=0 , **_A , ):
super().__init__(**_A )
__A : Union[str, Any] = image_size
__A : Dict = patch_size
__A : Any = num_channels
__A : Optional[int] = num_frames
__A : List[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : int = hidden_act
__A : int = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : List[str] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : int = qkv_bias
__A : List[str] = attention_type
__A : int = drop_path_rate
| 239
| 1
|
def _SCREAMING_SNAKE_CASE ( a , a ) -> float:
return base * power(_SCREAMING_SNAKE_CASE , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
UpperCAmelCase : List[str] = int(input('''Enter the base: ''').strip())
UpperCAmelCase : Tuple = int(input('''Enter the exponent: ''').strip())
UpperCAmelCase : str = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
UpperCAmelCase : Any = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 701
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''codegen'''
UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=50400 , _A=2048 , _A=2048 , _A=4096 , _A=28 , _A=16 , _A=64 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.0_2 , _A=True , _A=50256 , _A=50256 , _A=False , **_A , ):
__A : Any = vocab_size
__A : Tuple = n_ctx
__A : Union[str, Any] = n_positions
__A : Optional[Any] = n_embd
__A : Any = n_layer
__A : Dict = n_head
__A : Union[str, Any] = n_inner
__A : List[Any] = rotary_dim
__A : str = activation_function
__A : Any = resid_pdrop
__A : Tuple = embd_pdrop
__A : Tuple = attn_pdrop
__A : Union[str, Any] = layer_norm_epsilon
__A : str = initializer_range
__A : Optional[Any] = use_cache
__A : Union[str, Any] = bos_token_id
__A : Tuple = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__A : Dict = 0
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__A : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__A : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ):
return self._config.n_head
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__A : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : Any = seqlen + 2
__A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__A : Tuple = common_inputs['attention_mask']
if self.use_past:
__A : str = ordered_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
return 13
| 77
| 0
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Optional[int]:
_lowerCAmelCase : Dict = filter(lambda _lowerCamelCase : p.requires_grad , model.parameters() )
_lowerCAmelCase : int = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase_ = logging.getLogger(__name__)
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : str ) -> List[str]:
if metric == "rouge2":
_lowerCAmelCase : Dict = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_lowerCAmelCase : int = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_lowerCAmelCase : int = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
_lowerCAmelCase : List[Any] = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""" )
_lowerCAmelCase : Dict = ModelCheckpoint(
dirpath=_lowerCamelCase , filename=_lowerCamelCase , monitor=f'val_{metric}' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : int ) -> Dict:
return EarlyStopping(
monitor=f'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=_lowerCamelCase , verbose=_lowerCamelCase , )
class a_ (pl.Callback ):
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = {f'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_lowerCAmelCase : Union[str, Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_lowerCAmelCase : Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCAmelCase : str = od / """test_results.txt"""
_lowerCAmelCase : Any = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCAmelCase : Optional[int] = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_lowerCAmelCase : Optional[Any] = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , """a+""" ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCAmelCase : List[str] = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_lowerCAmelCase : Any = val.item()
_lowerCAmelCase : Optional[Any] = f'{key}: {val:.6f}\n'
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_lowerCAmelCase : Any = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(snake_case_ )
@rank_zero_only
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
try:
_lowerCAmelCase : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCAmelCase : str = pl_module.model.num_parameters()
_lowerCAmelCase : Any = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , """test""" )
@rank_zero_only
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 384
|
'''simple docstring'''
import qiskit
def _UpperCAmelCase ( _lowerCamelCase : int = 2 ) -> qiskit.result.counts.Counts:
_lowerCAmelCase : List[Any] = qubits
# Using Aer's simulator
_lowerCAmelCase : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
_lowerCAmelCase : List[Any] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _lowerCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase ) ) , list(range(_lowerCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCAmelCase : int = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=10_00 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 384
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__)
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : bool = field(default=lowercase , metadata={"""help""": """Whether to use SortishSampler or not."""})
__SCREAMING_SNAKE_CASE : bool = field(
default=lowercase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""})
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
__SCREAMING_SNAKE_CASE : Optional[Union[str, Path, GenerationConfig]] = field(
default=lowercase , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = v.to_dict()
return d
| 129
|
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : Any = ["""speech"""]
def __init__( self : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Union[str, Any] ):
requires_backends(self , ["speech"] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : str = ["""speech"""]
def __init__( self : int , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : int ):
requires_backends(self , ["speech"] )
| 129
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=_a, cache_dir=_a )
__SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(_a, os.listdir(_a )[0], "snapshots" ) )]
__SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=_a )
__SCREAMING_SNAKE_CASE = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(_a )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(_a )
__SCREAMING_SNAKE_CASE = jax.random.split(_a, _a )
__SCREAMING_SNAKE_CASE = shard(_a )
__SCREAMING_SNAKE_CASE = pipeline(_a, _a, _a, _a, jit=_a ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(_a, dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
__SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_a ) == num_samples
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=_a )
__SCREAMING_SNAKE_CASE = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 50
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(_a )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(_a )
__SCREAMING_SNAKE_CASE = jax.random.split(_a, _a )
__SCREAMING_SNAKE_CASE = shard(_a )
__SCREAMING_SNAKE_CASE = pipeline(_a, _a, _a, _a, jit=_a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(_a, dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=_a )
__SCREAMING_SNAKE_CASE = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 50
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(_a )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(_a )
__SCREAMING_SNAKE_CASE = jax.random.split(_a, _a )
__SCREAMING_SNAKE_CASE = shard(_a )
__SCREAMING_SNAKE_CASE = pipeline(_a, _a, _a, _a, jit=_a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_a, dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 50
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(_a )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(_a )
__SCREAMING_SNAKE_CASE = jax.random.split(_a, _a )
__SCREAMING_SNAKE_CASE = shard(_a )
__SCREAMING_SNAKE_CASE = pipeline(_a, _a, _a, _a, jit=_a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(_a, dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=_a, steps_offset=1, )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, scheduler=_a, safety_checker=_a, )
__SCREAMING_SNAKE_CASE = scheduler.create_state()
__SCREAMING_SNAKE_CASE = scheduler_state
__SCREAMING_SNAKE_CASE = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = 50
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(_a )
# shard inputs and rng
__SCREAMING_SNAKE_CASE = replicate(_a )
__SCREAMING_SNAKE_CASE = jax.random.split(_a, _a )
__SCREAMING_SNAKE_CASE = shard(_a )
__SCREAMING_SNAKE_CASE = pipeline(_a, _a, _a, _a, jit=_a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(_a, dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ), _a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=_a, )
__SCREAMING_SNAKE_CASE = replicate(_a )
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(_a )
__SCREAMING_SNAKE_CASE = shard(_a )
__SCREAMING_SNAKE_CASE = pipeline(_a, _a, _a, jit=_a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
__SCREAMING_SNAKE_CASE = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=_a, use_memory_efficient_attention=_a, )
__SCREAMING_SNAKE_CASE = replicate(_a )
__SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(_a )
__SCREAMING_SNAKE_CASE = shard(_a )
__SCREAMING_SNAKE_CASE = pipeline(_a, _a, _a, jit=_a ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
__SCREAMING_SNAKE_CASE = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 693
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def a_ ( __snake_case , __snake_case=False ) -> Any:
'''simple docstring'''
UpperCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase_ = ''
else:
UpperCamelCase_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
UpperCamelCase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase_ = in_proj_bias[: config.hidden_size]
UpperCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase_ = in_proj_bias[-config.hidden_size :]
def a_ ( __snake_case ) -> str:
'''simple docstring'''
UpperCamelCase_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase_ = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase_ = dct.pop(__snake_case )
UpperCamelCase_ = val
def a_ ( __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
UpperCamelCase_ = ViTMSNConfig()
UpperCamelCase_ = 1_0_0_0
UpperCamelCase_ = 'datasets/huggingface/label-files'
UpperCamelCase_ = 'imagenet-1k-id2label.json'
UpperCamelCase_ = json.load(open(hf_hub_download(__snake_case , __snake_case ) , 'r' ) )
UpperCamelCase_ = {int(__snake_case ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase_ = 3_8_4
UpperCamelCase_ = 1_5_3_6
UpperCamelCase_ = 6
elif "l16" in checkpoint_url:
UpperCamelCase_ = 1_0_2_4
UpperCamelCase_ = 4_0_9_6
UpperCamelCase_ = 2_4
UpperCamelCase_ = 1_6
UpperCamelCase_ = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase_ = 4
elif "l7" in checkpoint_url:
UpperCamelCase_ = 7
UpperCamelCase_ = 1_0_2_4
UpperCamelCase_ = 4_0_9_6
UpperCamelCase_ = 2_4
UpperCamelCase_ = 1_6
UpperCamelCase_ = 0.1
UpperCamelCase_ = ViTMSNModel(__snake_case )
UpperCamelCase_ = torch.hub.load_state_dict_from_url(__snake_case , map_location='cpu' )['target_encoder']
UpperCamelCase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__snake_case )
UpperCamelCase_ = create_rename_keys(__snake_case , base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , base_model=__snake_case )
model.load_state_dict(__snake_case )
model.eval()
UpperCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
UpperCamelCase_ = ViTImageProcessor(
size=config.image_size , image_mean=__snake_case , image_std=__snake_case )
UpperCamelCase_ = image_processor(images=__snake_case , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase_ = model(**__snake_case )
UpperCamelCase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase_ = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
UpperCamelCase_ = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
UpperCamelCase_ = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
UpperCamelCase_ = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
UpperCamelCase_ = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __snake_case , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__a : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 709
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__a : int = ["""small""", """medium""", """large"""]
__a : List[Any] = """lm_head.decoder.weight"""
__a : Optional[int] = """lm_head.weight"""
def a_ ( __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase_ = torch.load(__snake_case )
UpperCamelCase_ = d.pop(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if __name__ == "__main__":
__a : Tuple = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
__a : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__a : Optional[Any] = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__a : Dict = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 559
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = '''canine'''
def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=1_6384 , a=16 , a=0.02 , a=1E-12 , a=0 , a=0xe0_00 , a=0xe0_01 , a=4 , a=4 , a=8 , a=1_6384 , a=128 , **a , ) -> str:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
# Character config:
SCREAMING_SNAKE_CASE = downsampling_rate
SCREAMING_SNAKE_CASE = upsampling_kernel_size
SCREAMING_SNAKE_CASE = num_hash_functions
SCREAMING_SNAKE_CASE = num_hash_buckets
SCREAMING_SNAKE_CASE = local_transformer_stride
| 73
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663
| 0
|
import os
import sys
__A : Optional[int] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__A : Tuple = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __UpperCamelCase ( *_A : int , **_A : Tuple ) ->Any:
"""simple docstring"""
return AutoConfig.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __UpperCamelCase ( *_A : Tuple , **_A : Optional[Any] ) ->int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModel.__doc__ )
def __UpperCamelCase ( *_A : Union[str, Any] , **_A : Union[str, Any] ) ->int:
"""simple docstring"""
return AutoModel.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __UpperCamelCase ( *_A : List[str] , **_A : List[str] ) ->Optional[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __UpperCamelCase ( *_A : Any , **_A : int ) ->str:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __UpperCamelCase ( *_A : int , **_A : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __UpperCamelCase ( *_A : str , **_A : List[Any] ) ->Any:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*_A , **_A )
| 75
|
from collections import deque
from math import floor
from random import random
from time import time
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> List[str]:
lowerCamelCase_ ={}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[Any]:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase_ =[[w, v]]
if not self.graph.get(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =[]
def _snake_case ( self )-> str:
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> Optional[Any]:
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]:
if c == -1:
lowerCamelCase_ =floor(random() * 1_0000 ) + 10
for i in range(_SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Any:
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
return len(self.graph[u] )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Union[str, Any]:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return sorted_nodes
def _snake_case ( self )-> str:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return list(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> List[str]:
lowerCamelCase_ =time()
self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]:
lowerCamelCase_ =time()
self.bfs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> Optional[Any]:
lowerCamelCase_ ={}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )-> List[str]:
# check if the u exists
if self.graph.get(_SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase_ =[[w, v]]
# add the other way
if self.graph.get(_SCREAMING_SNAKE_CASE ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase_ =[[w, u]]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_SCREAMING_SNAKE_CASE )
# the other way round
if self.graph.get(_SCREAMING_SNAKE_CASE ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> int:
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_SCREAMING_SNAKE_CASE )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )-> Optional[int]:
if c == -1:
lowerCamelCase_ =floor(random() * 1_0000 ) + 10
for i in range(_SCREAMING_SNAKE_CASE ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> List[str]:
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return len(self.graph[u] )
def _snake_case ( self )-> Any:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return list(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(_SCREAMING_SNAKE_CASE )
visited.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowerCamelCase_ =stack[len(_SCREAMING_SNAKE_CASE ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
def _snake_case ( self )-> Optional[Any]:
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )-> str:
lowerCamelCase_ =time()
self.dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )-> Dict:
lowerCamelCase_ =time()
self.bfs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =time()
return end - begin
| 75
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowercase__ :List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__UpperCAmelCase : Any = downstream_dict['''projector.weight''']
__UpperCAmelCase : List[Any] = downstream_dict['''projector.bias''']
__UpperCAmelCase : str = downstream_dict['''model.post_net.linear.weight''']
__UpperCAmelCase : Dict = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__UpperCAmelCase : str = downstream_dict['''model.linear.weight''']
__UpperCAmelCase : List[Any] = downstream_dict['''model.linear.bias''']
return model
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = WavaVecaForXVector.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = downstream_dict['''connector.weight''']
__UpperCAmelCase : Optional[Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase : List[str] = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__UpperCAmelCase : Optional[int] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__UpperCAmelCase : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__UpperCAmelCase : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__UpperCAmelCase : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__UpperCAmelCase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__UpperCAmelCase : Optional[Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : str = torch.load(UpperCAmelCase_ , map_location='''cpu''' )
__UpperCAmelCase : List[Any] = checkpoint['''Downstream''']
__UpperCAmelCase : int = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ )
__UpperCAmelCase : int = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__UpperCAmelCase : int = convert_classification(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__UpperCAmelCase : Optional[int] = convert_diarization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('''ForXVector''' ):
__UpperCAmelCase : int = convert_xvector(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase : Tuple = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowercase__ :Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 522
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[int] = 'SpeechT5FeatureExtractor'
_A : List[Any] = 'SpeechT5Tokenizer'
def __init__( self : List[str] , __lowercase : List[str] , __lowercase : int ):
'''simple docstring'''
super().__init__(__lowercase , __lowercase )
def __call__( self : Any , *__lowercase : List[str] , **__lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = kwargs.pop('''audio''' , __lowercase )
__UpperCAmelCase : str = kwargs.pop('''text''' , __lowercase )
__UpperCAmelCase : List[str] = kwargs.pop('''text_target''' , __lowercase )
__UpperCAmelCase : Optional[Any] = kwargs.pop('''audio_target''' , __lowercase )
__UpperCAmelCase : Tuple = kwargs.pop('''sampling_rate''' , __lowercase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__UpperCAmelCase : Union[str, Any] = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
elif text is not None:
__UpperCAmelCase : Optional[int] = self.tokenizer(__lowercase , **__lowercase )
else:
__UpperCAmelCase : List[Any] = None
if audio_target is not None:
__UpperCAmelCase : Tuple = self.feature_extractor(audio_target=__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
__UpperCAmelCase : Optional[int] = targets['''input_values''']
elif text_target is not None:
__UpperCAmelCase : Optional[Any] = self.tokenizer(__lowercase , **__lowercase )
__UpperCAmelCase : int = targets['''input_ids''']
else:
__UpperCAmelCase : List[str] = None
if inputs is None:
return targets
if targets is not None:
__UpperCAmelCase : Any = labels
__UpperCAmelCase : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__UpperCAmelCase : List[Any] = decoder_attention_mask
return inputs
def A_ ( self : List[str] , *__lowercase : Dict , **__lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = kwargs.pop('''input_values''' , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop('''input_ids''' , __lowercase )
__UpperCAmelCase : str = kwargs.pop('''labels''' , __lowercase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__UpperCAmelCase : Any = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
elif input_ids is not None:
__UpperCAmelCase : Any = self.tokenizer.pad(__lowercase , **__lowercase )
else:
__UpperCAmelCase : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowercase , __lowercase ) and "input_ids" in labels[0]):
__UpperCAmelCase : str = self.tokenizer.pad(__lowercase , **__lowercase )
__UpperCAmelCase : str = targets['''input_ids''']
else:
__UpperCAmelCase : Union[str, Any] = self.feature_extractor.feature_size
__UpperCAmelCase : str = self.feature_extractor.num_mel_bins
__UpperCAmelCase : List[Any] = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
__UpperCAmelCase : Dict = feature_size_hack
__UpperCAmelCase : Union[str, Any] = targets['''input_values''']
else:
__UpperCAmelCase : Tuple = None
if inputs is None:
return targets
if targets is not None:
__UpperCAmelCase : Tuple = labels
__UpperCAmelCase : int = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__UpperCAmelCase : Dict = decoder_attention_mask
return inputs
def A_ ( self : Optional[int] , *__lowercase : Optional[int] , **__lowercase : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def A_ ( self : Union[str, Any] , *__lowercase : Dict , **__lowercase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__lowercase , **__lowercase )
| 522
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
UpperCamelCase = {
'''google/rembert''': 2_5_6,
}
UpperCamelCase = '''▁'''
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : Any = VOCAB_FILES_NAMES
lowerCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[int] = RemBertTokenizer
def __init__(self : Optional[Any] , _snake_case : Optional[int]=None , _snake_case : int=None , _snake_case : Optional[Any]=True , _snake_case : str=True , _snake_case : List[Any]=False , _snake_case : Union[str, Any]="[CLS]" , _snake_case : List[str]="[SEP]" , _snake_case : List[Any]="<unk>" , _snake_case : Union[str, Any]="[SEP]" , _snake_case : Optional[int]="<pad>" , _snake_case : Tuple="[CLS]" , _snake_case : Union[str, Any]="[MASK]" , **_snake_case : Any , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
lowerCamelCase_ : List[Any] = do_lower_case
lowerCamelCase_ : Any = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : List[Any] = vocab_file
lowerCamelCase_ : Optional[int] = False if not self.vocab_file else True
def UpperCAmelCase_ (self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ : Dict = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ (self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCAmelCase_ (self : List[str] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ (self : int , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
lowerCamelCase_ : Tuple = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 144
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
UpperCamelCase = None
def _a ( ) -> Tuple:
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=lowerCamelCase__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=lowerCamelCase__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _a ( lowerCamelCase__ ) -> Union[str, Any]:
lowerCamelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def _a ( lowerCamelCase__ ) -> Any:
def remove_articles(lowerCamelCase__ ):
return ARTICLES_REGEX.sub(' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
lowerCamelCase_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def _a ( lowerCamelCase__ ) -> Optional[Any]:
if not s:
return []
return normalize_answer(lowerCamelCase__ ).split()
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
lowerCamelCase_ : Dict = get_tokens(lowerCamelCase__ )
lowerCamelCase_ : Any = get_tokens(lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] = collections.Counter(lowerCamelCase__ ) & collections.Counter(lowerCamelCase__ )
lowerCamelCase_ : Any = sum(common.values() )
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCamelCase_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ : Any = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
lowerCamelCase_ : List[Any] = {}
lowerCamelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Optional[Any] = qa['id']
lowerCamelCase_ : List[str] = [t for t in qa['answers']['text'] if normalize_answer(lowerCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase_ : List[Any] = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
lowerCamelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCamelCase_ : Tuple = max(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
lowerCamelCase_ : str = max(compute_fa(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
lowerCamelCase_ : Union[str, Any] = {}
for qid, s in scores.items():
lowerCamelCase_ : str = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase_ : str = float(not qid_to_has_ans[qid] )
else:
lowerCamelCase_ : List[Any] = s
return new_scores
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[Any]:
if not qid_list:
lowerCamelCase_ : int = len(lowerCamelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowerCamelCase_ : Tuple = len(lowerCamelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
for k in new_eval:
lowerCamelCase_ : str = new_eval[k]
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
plt.step(lowerCamelCase__ , lowerCamelCase__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(lowerCamelCase__ , lowerCamelCase__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase__ )
plt.savefig(lowerCamelCase__ )
plt.clf()
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ) -> Dict:
lowerCamelCase_ : str = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
lowerCamelCase_ : List[str] = 0.0
lowerCamelCase_ : str = 1.0
lowerCamelCase_ : Union[str, Any] = 0.0
lowerCamelCase_ : str = [1.0]
lowerCamelCase_ : Any = [0.0]
lowerCamelCase_ : Optional[int] = 0.0
for i, qid in enumerate(lowerCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase_ : List[Any] = true_pos / float(i + 1 )
lowerCamelCase_ : str = true_pos / float(lowerCamelCase__ )
if i == len(lowerCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase__ )
recalls.append(lowerCamelCase__ )
if out_image:
plot_pr_curve(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return {"ap": 100.0 * avg_prec}
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
if out_image_dir and not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCamelCase_ : Dict = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowerCamelCase_ : Optional[Any] = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowerCamelCase_ : List[Any] = {k: float(lowerCamelCase__ ) for k, v in qid_to_has_ans.items()}
lowerCamelCase_ : str = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_exact' )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_f1' )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_oracle' )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
if not qid_list:
return
lowerCamelCase_ : int = [na_probs[k] for k in qid_list]
lowerCamelCase_ : Dict = np.ones_like(lowerCamelCase__ ) / float(len(lowerCamelCase__ ) )
plt.hist(lowerCamelCase__ , weights=lowerCamelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(lowerCamelCase__ , F'na_prob_hist_{name}.png' ) )
plt.clf()
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
lowerCamelCase_ : List[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCamelCase_ : Tuple = num_no_ans
lowerCamelCase_ : Dict = cur_score
lowerCamelCase_ : int = 0.0
lowerCamelCase_ : int = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase_ : List[str] = scores[qid]
else:
if preds[qid]:
lowerCamelCase_ : int = -1
else:
lowerCamelCase_ : Any = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase_ : List[str] = cur_score
lowerCamelCase_ : Dict = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase__ ), best_thresh
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowerCamelCase_ , lowerCamelCase_ : Any = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = best_exact
lowerCamelCase_ : List[str] = exact_thresh
lowerCamelCase_ : str = best_fa
lowerCamelCase_ : Optional[int] = fa_thresh
def _a ( ) -> Optional[Any]:
with open(OPTS.data_file ) as f:
lowerCamelCase_ : List[str] = json.load(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowerCamelCase_ : List[str] = json.load(lowerCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCamelCase_ : int = json.load(lowerCamelCase__ )
else:
lowerCamelCase_ : Dict = {k: 0.0 for k in preds}
lowerCamelCase_ : List[Any] = make_qid_to_has_ans(lowerCamelCase__ ) # maps qid to True/False
lowerCamelCase_ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase_ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase_ , lowerCamelCase_ : Tuple = get_raw_scores(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Any = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ : Dict = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ : Tuple = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ )
if has_ans_qids:
lowerCamelCase_ : List[str] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'HasAns' )
if no_ans_qids:
lowerCamelCase_ : Optional[Any] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
else:
print(json.dumps(lowerCamelCase__ , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 144
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : ScoreSdeVeScheduler
def __init__( self : str , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : str = self.unet.config.sample_size
__a : Any = (batch_size, 3, img_size, img_size)
__a : List[str] = self.unet
__a : str = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ) * self.scheduler.init_noise_sigma
__a : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
self.scheduler.set_sigmas(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__a : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__a : Any = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
__a : List[str] = self.scheduler.step_correct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# prediction step
__a : Tuple = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
__a : Any = self.scheduler.step_pred(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
__a , __a : int = output.prev_sample, output.prev_sample_mean
__a : Tuple = sample_mean.clamp(0 , 1 )
__a : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a : str = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 47
|
from collections.abc import Sequence
from queue import Queue
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
__a : Tuple = start
__a : Dict = end
__a : List[str] = val
__a : List[Any] = (start + end) // 2
__a : Optional[Any] = left
__a : List[str] = right
def __repr__( self : Dict ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = collection
__a : Dict = function
if self.collection:
__a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] )
__a : Tuple = (start + end) // 2
__a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.fn(node.left.val , node.right.val )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.root is not None:
__a : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 47
| 1
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_a = data_utils.TransfoXLTokenizer
_a = data_utils.TransfoXLCorpus
_a = data_utils
_a = data_utils
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__snake_case, '''rb''' ) as fp:
_UpperCamelCase = pickle.load(__snake_case, encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCamelCase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_UpperCamelCase = corpus.vocab.__dict__
torch.save(__snake_case, __snake_case )
_UpperCamelCase = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''', __snake_case )
_UpperCamelCase = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(__snake_case, __snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCamelCase = os.path.abspath(__snake_case )
_UpperCamelCase = os.path.abspath(__snake_case )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCamelCase = TransfoXLConfig()
else:
_UpperCamelCase = TransfoXLConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase = TransfoXLLMHeadModel(__snake_case )
_UpperCamelCase = load_tf_weights_in_transfo_xl(__snake_case, __snake_case, __snake_case )
# Save pytorch-model
_UpperCamelCase = os.path.join(__snake_case, __snake_case )
_UpperCamelCase = os.path.join(__snake_case, __snake_case )
print(F'''Save PyTorch model to {os.path.abspath(__snake_case )}''' )
torch.save(model.state_dict(), __snake_case )
print(F'''Save configuration file to {os.path.abspath(__snake_case )}''' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
_a = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 78
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) )
_UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) )
_UpperCamelCase = 0.01
with locka.acquire():
with pytest.raises(__snake_case ):
_UpperCamelCase = time.time()
locka.acquire(__snake_case )
assert time.time() - _start > timeout
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = '''a''' * 10_00 + '''.lock'''
_UpperCamelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
_UpperCamelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__snake_case ):
locka.acquire(0 )
| 78
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
# TODO: upload to AWS
_lowercase = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class _lowercase ( __a ):
_UpperCAmelCase = '''retribert'''
def __init__( self , A__=3_05_22 , A__=7_68 , A__=8 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=True , A__=1_28 , A__=0 , **A__ , ) -> Optional[Any]:
super().__init__(pad_token_id=A__ , **A__ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = share_encoders
snake_case = projection_dim
| 342
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowercase = logging.get_logger(__name__)
class _lowercase :
def __init__( self , A__ , A__ ) -> Tuple:
snake_case = question_encoder
snake_case = generator
snake_case = self.question_encoder
def UpperCamelCase ( self , A__ ) -> int:
if os.path.isfile(A__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(A__ , exist_ok=A__ )
snake_case = os.path.join(A__ , '''question_encoder_tokenizer''' )
snake_case = os.path.join(A__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(A__ )
self.generator.save_pretrained(A__ )
@classmethod
def UpperCamelCase ( cls , A__ , **A__ ) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case = kwargs.pop('''config''' , A__ )
if config is None:
snake_case = RagConfig.from_pretrained(A__ )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=A__ , generator=A__ )
def __call__( self , *A__ , **A__ ) -> Any:
return self.current_tokenizer(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.decode(*A__ , **A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.question_encoder
def UpperCamelCase ( self ) -> str:
snake_case = self.generator
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = "longest" , A__ = None , A__ = True , **A__ , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , A__ , )
if max_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
A__ , add_special_tokens=A__ , return_tensors=A__ , max_length=A__ , padding=A__ , truncation=A__ , **A__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
text_target=A__ , add_special_tokens=A__ , return_tensors=A__ , padding=A__ , max_length=A__ , truncation=A__ , **A__ , )
snake_case = labels['''input_ids''']
return model_inputs
| 342
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = ["""image_processor""", """tokenizer"""]
_snake_case : Optional[Any] = """ViTImageProcessor"""
_snake_case : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self :Dict , lowerCamelCase__ :Optional[int]=None , lowerCamelCase__ :Any=None , **lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase__ , )
UpperCamelCase__ :Dict = kwargs.pop("""feature_extractor""" )
UpperCamelCase__ :Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self :List[Any] , lowerCamelCase__ :Optional[int]=None , lowerCamelCase__ :str=None , lowerCamelCase__ :Optional[Any]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :Optional[Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
UpperCamelCase__ :Union[str, Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if visual_prompt is not None:
UpperCamelCase__ :List[str] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if images is not None:
UpperCamelCase__ :Optional[Any] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if visual_prompt is not None and images is not None:
UpperCamelCase__ :List[str] = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCamelCase__ :Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCamelCase__ :List[str] = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def __a ( self :Optional[Any] , *lowerCamelCase__ :Union[str, Any] , **lowerCamelCase__ :Dict ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :List[Any] , *lowerCamelCase__ :Union[str, Any] , **lowerCamelCase__ :Tuple ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def __a ( self :Optional[int] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase__ , )
return self.image_processor_class
@property
def __a ( self :Optional[Any] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase__ , )
return self.image_processor
| 383
|
def A ( lowercase__ : List[str] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Union[str, Any] ) -> Tuple:
if index == r:
for j in range(lowercase__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCamelCase__ :Union[str, Any] = arr[i]
combination_util(lowercase__ , lowercase__ , lowercase__ , index + 1 , lowercase__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Any ) -> Tuple:
# A temporary array to store all combination one by one
UpperCamelCase__ :int = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase__ , lowercase__ , lowercase__ , 0 , lowercase__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 383
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :int = logging.get_logger(__name__)
a_ :Tuple = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text'''
lowerCamelCase : str = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Any , _lowercase : List[Any]=1_00_00 , _lowercase : Union[str, Any]=12 , _lowercase : int=20_48 , _lowercase : Tuple=4 , _lowercase : Any=6 , _lowercase : int=20_48 , _lowercase : str=4 , _lowercase : Optional[Any]=0.0 , _lowercase : List[str]=0.0 , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : List[str]="relu" , _lowercase : List[str]=2_56 , _lowercase : str=0.1 , _lowercase : Tuple=0.0 , _lowercase : List[Any]=0.0 , _lowercase : int=0.02 , _lowercase : int=2 , _lowercase : Any=True , _lowercase : List[str]=1 , _lowercase : List[str]=0 , _lowercase : Tuple=2 , _lowercase : Dict=60_00 , _lowercase : str=10_24 , _lowercase : int=2 , _lowercase : Any=(5, 5) , _lowercase : Tuple=10_24 , _lowercase : Optional[Any]=80 , _lowercase : Tuple=1 , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : int = d_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = encoder_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Any = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE__ : int = activation_function
SCREAMING_SNAKE_CASE__ : List[str] = init_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : int = use_cache
SCREAMING_SNAKE_CASE__ : Any = encoder_layers
SCREAMING_SNAKE_CASE__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Dict = max_source_positions
SCREAMING_SNAKE_CASE__ : Tuple = max_target_positions
SCREAMING_SNAKE_CASE__ : List[Any] = num_conv_layers
SCREAMING_SNAKE_CASE__ : str = list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = conv_channels
SCREAMING_SNAKE_CASE__ : List[Any] = input_feat_per_channel
SCREAMING_SNAKE_CASE__ : List[str] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 35
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ ( lowerCAmelCase__ ):
def __init__( self: List[str]):
'''simple docstring'''
__lowerCAmelCase = []
def _lowercase ( self: Dict, _lowercase: Any, _lowercase: Tuple, _lowercase: Optional[Any], **_lowercase: Any):
'''simple docstring'''
self.events.append("""on_init_end""")
def _lowercase ( self: Tuple, _lowercase: Optional[int], _lowercase: int, _lowercase: Tuple, **_lowercase: List[Any]):
'''simple docstring'''
self.events.append("""on_train_begin""")
def _lowercase ( self: Dict, _lowercase: Tuple, _lowercase: Optional[Any], _lowercase: List[str], **_lowercase: str):
'''simple docstring'''
self.events.append("""on_train_end""")
def _lowercase ( self: int, _lowercase: Union[str, Any], _lowercase: Any, _lowercase: str, **_lowercase: List[str]):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def _lowercase ( self: List[Any], _lowercase: str, _lowercase: Dict, _lowercase: Optional[Any], **_lowercase: int):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def _lowercase ( self: Optional[Any], _lowercase: List[str], _lowercase: List[Any], _lowercase: Union[str, Any], **_lowercase: Union[str, Any]):
'''simple docstring'''
self.events.append("""on_step_begin""")
def _lowercase ( self: Dict, _lowercase: Dict, _lowercase: Tuple, _lowercase: Any, **_lowercase: str):
'''simple docstring'''
self.events.append("""on_step_end""")
def _lowercase ( self: Union[str, Any], _lowercase: str, _lowercase: List[Any], _lowercase: List[str], **_lowercase: List[str]):
'''simple docstring'''
self.events.append("""on_evaluate""")
def _lowercase ( self: Optional[int], _lowercase: List[Any], _lowercase: List[Any], _lowercase: List[Any], **_lowercase: str):
'''simple docstring'''
self.events.append("""on_predict""")
def _lowercase ( self: str, _lowercase: List[Any], _lowercase: int, _lowercase: Dict, **_lowercase: int):
'''simple docstring'''
self.events.append("""on_save""")
def _lowercase ( self: Any, _lowercase: int, _lowercase: List[Any], _lowercase: List[Any], **_lowercase: List[str]):
'''simple docstring'''
self.events.append("""on_log""")
def _lowercase ( self: Optional[Any], _lowercase: Optional[int], _lowercase: int, _lowercase: int, **_lowercase: Union[str, Any]):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class lowercase_ ( unittest.TestCase ):
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = tempfile.mkdtemp()
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def _lowercase ( self: int, _lowercase: List[Any]=0, _lowercase: Optional[int]=0, _lowercase: Optional[Any]=64, _lowercase: Tuple=64, _lowercase: int=None, _lowercase: int=False, **_lowercase: Dict):
'''simple docstring'''
__lowerCAmelCase = RegressionDataset(length=_lowercase)
__lowerCAmelCase = RegressionDataset(length=_lowercase)
__lowerCAmelCase = RegressionModelConfig(a=_lowercase, b=_lowercase)
__lowerCAmelCase = RegressionPreTrainedModel(_lowercase)
__lowerCAmelCase = TrainingArguments(self.output_dir, disable_tqdm=_lowercase, report_to=[], **_lowercase)
return Trainer(
_lowercase, _lowercase, train_dataset=_lowercase, eval_dataset=_lowercase, callbacks=_lowercase, )
def _lowercase ( self: Any, _lowercase: str, _lowercase: List[str]):
'''simple docstring'''
self.assertEqual(len(_lowercase), len(_lowercase))
# Order doesn't matter
__lowerCAmelCase = sorted(_lowercase, key=lambda _lowercase: cb.__name__ if isinstance(_lowercase, _lowercase) else cb.__class__.__name__)
__lowerCAmelCase = sorted(_lowercase, key=lambda _lowercase: cb.__name__ if isinstance(_lowercase, _lowercase) else cb.__class__.__name__)
for cba, cba in zip(_lowercase, _lowercase):
if isinstance(_lowercase, _lowercase) and isinstance(_lowercase, _lowercase):
self.assertEqual(_lowercase, _lowercase)
elif isinstance(_lowercase, _lowercase) and not isinstance(_lowercase, _lowercase):
self.assertEqual(_lowercase, cba.__class__)
elif not isinstance(_lowercase, _lowercase) and isinstance(_lowercase, _lowercase):
self.assertEqual(cba.__class__, _lowercase)
else:
self.assertEqual(_lowercase, _lowercase)
def _lowercase ( self: Tuple, _lowercase: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = ["""on_init_end""", """on_train_begin"""]
__lowerCAmelCase = 0
__lowerCAmelCase = len(trainer.get_eval_dataloader())
__lowerCAmelCase = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(_lowercase):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowercase ( self: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = self.get_trainer()
__lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
# Callbacks passed at init are added to the default callbacks
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(_lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCAmelCase = self.get_trainer(disable_tqdm=_lowercase)
__lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCAmelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_lowercase)
expected_callbacks.remove(_lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
__lowerCAmelCase = self.get_trainer()
__lowerCAmelCase = trainer.pop_callback(_lowercase)
self.assertEqual(cb.__class__, _lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
trainer.add_callback(_lowercase)
expected_callbacks.insert(0, _lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
# We can also add, pop, or remove by instance
__lowerCAmelCase = self.get_trainer()
__lowerCAmelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_lowercase)
expected_callbacks.remove(_lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
__lowerCAmelCase = self.get_trainer()
__lowerCAmelCase = trainer.callback_handler.callbacks[0]
__lowerCAmelCase = trainer.pop_callback(_lowercase)
self.assertEqual(_lowercase, _lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
trainer.add_callback(_lowercase)
expected_callbacks.insert(0, _lowercase)
self.check_callbacks_equality(trainer.callback_handler.callbacks, _lowercase)
def _lowercase ( self: str):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""", category=_lowercase)
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
# Independent log/save/eval
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5)
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5)
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy="""steps""")
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
__lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy="""epoch""")
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
# A bit of everything
__lowerCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy="""steps""", )
trainer.train()
__lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase, self.get_expected_events(_lowercase))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
__lowerCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], )
assert str(_lowercase) in warn_mock.call_args[0][0]
| 701
|
import argparse
import os
import re
__A : List[Any] = "src/diffusers"
# Pattern that looks at the indentation in a line.
__A : Dict = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Optional[int] = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : Union[str, Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__A : List[Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : List[str] = re.compile(r"\[([^\]]+)\]")
def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = _re_indent.search(UpperCamelCase__ )
return "" if search is None else search.groups()[0]
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase__ ):
index += 1
__lowerCAmelCase = ["""\n""".join(lines[:index] )]
else:
__lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCAmelCase = [lines[index]]
index += 1
while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCamelCase__ ) )
if index < len(UpperCamelCase__ ) - 1:
__lowerCAmelCase = [lines[index + 1]]
index += 1
else:
__lowerCAmelCase = []
else:
blocks.append("""\n""".join(UpperCamelCase__ ) )
__lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase__ ) > 0:
blocks.append("""\n""".join(UpperCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCAmelCase ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
def _inner(UpperCamelCase__ ):
return key(UpperCamelCase__ ).lower().replace("""_""" , """""" )
return _inner
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
def noop(UpperCamelCase__ ):
return x
if key is None:
__lowerCAmelCase = noop
# Constants are all uppercase, they go first.
__lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCAmelCase = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()]
__lowerCAmelCase = ignore_underscore(UpperCamelCase__ )
return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
def _replace(UpperCamelCase__ ):
__lowerCAmelCase = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
__lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) + "]"
__lowerCAmelCase = import_statement.split("""\n""" )
if len(UpperCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCAmelCase = 2 if lines[1].strip() == """[""" else 1
__lowerCAmelCase = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowerCAmelCase = sort_objects(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )
__lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowerCAmelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase = keys[:-1]
__lowerCAmelCase = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] )
return "\n".join(UpperCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__lowerCAmelCase = _re_bracket_content.sub(_replace , UpperCamelCase__ )
return import_statement
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=True ) -> Optional[int]:
'''simple docstring'''
with open(UpperCamelCase__ , """r""" ) as f:
__lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCAmelCase = split_code_in_indented_blocks(
UpperCamelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCAmelCase = main_blocks[block_idx]
__lowerCAmelCase = block.split("""\n""" )
# Get to the start of the imports.
__lowerCAmelCase = 0
while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCAmelCase = len(UpperCamelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCAmelCase = """\n""".join(block_lines[line_idx:-1] )
__lowerCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowerCAmelCase = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCAmelCase = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCAmelCase = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCAmelCase = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None]
__lowerCAmelCase = [x[0] for x in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCAmelCase = 0
__lowerCAmelCase = []
for i in range(len(UpperCamelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__lowerCAmelCase = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCamelCase__ , """w""" ) as f:
f.write("""\n""".join(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__=True ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = []
for root, _, files in os.walk(UpperCamelCase__ ):
if "__init__.py" in files:
__lowerCAmelCase = sort_imports(os.path.join(UpperCamelCase__ , """__init__.py""" ) , check_only=UpperCamelCase__ )
if result:
__lowerCAmelCase = [os.path.join(UpperCamelCase__ , """__init__.py""" )]
if len(UpperCamelCase__ ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCamelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__A : Any = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 334
| 0
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : str
lowercase_ : List[str]
lowercase_ : Optional[List[str]]
@dataclass
class _lowerCamelCase:
lowercase_ : List[int]
lowercase_ : List[int]
lowercase_ : Optional[List[int]] = None
lowercase_ : Optional[List[int]] = None
class _lowerCamelCase( _a ):
lowercase_ : List[str] = """train"""
lowercase_ : List[Any] = """dev"""
lowercase_ : List[str] = """test"""
class _lowerCamelCase:
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowerCamelCase) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase="[CLS]", lowerCamelCase=1, lowerCamelCase="[SEP]", lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=0, lowerCamelCase=0, lowerCamelCase=-1_00, lowerCamelCase=0, lowerCamelCase=True, ) -> List[InputFeatures]:
"""simple docstring"""
_lowercase : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase)}
_lowercase : Union[str, Any] = []
for ex_index, example in enumerate(lowerCamelCase):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d', lowerCamelCase, len(lowerCamelCase))
_lowercase : str = []
_lowercase : Tuple = []
for word, label in zip(example.words, example.labels):
_lowercase : Dict = tokenizer.tokenize(lowerCamelCase)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowerCamelCase) > 0:
tokens.extend(lowerCamelCase)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCamelCase) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowercase : int = tokenizer.num_special_tokens_to_add()
if len(lowerCamelCase) > max_seq_length - special_tokens_count:
_lowercase : Optional[int] = tokens[: (max_seq_length - special_tokens_count)]
_lowercase : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowercase : List[str] = [sequence_a_segment_id] * len(lowerCamelCase)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowercase : Union[str, Any] = [cls_token] + tokens
_lowercase : Any = [pad_token_label_id] + label_ids
_lowercase : List[str] = [cls_token_segment_id] + segment_ids
_lowercase : Dict = tokenizer.convert_tokens_to_ids(lowerCamelCase)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowercase : Optional[int] = [1 if mask_padding_with_zero else 0] * len(lowerCamelCase)
# Zero-pad up to the sequence length.
_lowercase : Any = max_seq_length - len(lowerCamelCase)
if pad_on_left:
_lowercase : Dict = ([pad_token] * padding_length) + input_ids
_lowercase : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowercase : List[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
_lowercase : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowerCamelCase) == max_seq_length
assert len(lowerCamelCase) == max_seq_length
assert len(lowerCamelCase) == max_seq_length
assert len(lowerCamelCase) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***')
logger.info('guid: %s', example.guid)
logger.info('tokens: %s', ' '.join([str(lowerCamelCase) for x in tokens]))
logger.info('input_ids: %s', ' '.join([str(lowerCamelCase) for x in input_ids]))
logger.info('input_mask: %s', ' '.join([str(lowerCamelCase) for x in input_mask]))
logger.info('segment_ids: %s', ' '.join([str(lowerCamelCase) for x in segment_ids]))
logger.info('label_ids: %s', ' '.join([str(lowerCamelCase) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
_lowercase : Optional[Any] = None
features.append(
InputFeatures(
input_ids=lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, label_ids=lowerCamelCase))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _lowerCamelCase( _a ):
lowercase_ : List[InputFeatures]
lowercase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase=False, lowerCamelCase = Split.train, ) -> int:
"""simple docstring"""
_lowercase : List[str] = os.path.join(
lowerCamelCase, 'cached_{}_{}_{}'.format(mode.value, tokenizer.__class__.__name__, str(lowerCamelCase)), )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowercase : List[Any] = cached_features_file + '.lock'
with FileLock(lowerCamelCase):
if os.path.exists(lowerCamelCase) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''')
_lowercase : str = torch.load(lowerCamelCase)
else:
logger.info(F'''Creating features from dataset file at {data_dir}''')
_lowercase : Optional[int] = token_classification_task.read_examples_from_file(lowerCamelCase, lowerCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
_lowercase : Optional[int] = token_classification_task.convert_examples_to_features(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, cls_token_at_end=bool(model_type in ['xlnet']), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=lowerCamelCase, pad_on_left=bool(tokenizer.padding_side == 'left'), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
logger.info(F'''Saving features into cached file {cached_features_file}''')
torch.save(self.features, lowerCamelCase)
def __len__( self) -> int:
"""simple docstring"""
return len(self.features)
def __getitem__( self, lowerCamelCase) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase:
lowercase_ : List[InputFeatures]
lowercase_ : int = -1_00
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase=False, lowerCamelCase = Split.train, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = token_classification_task.read_examples_from_file(lowerCamelCase, lowerCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
_lowercase : int = token_classification_task.convert_examples_to_features(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, cls_token_at_end=bool(model_type in ['xlnet']), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=lowerCamelCase, pad_on_left=bool(tokenizer.padding_side == 'left'), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowercase : Optional[Any] = tf.data.Dataset.from_generator(
lowerCamelCase, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa), (
{'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None])},
tf.TensorShape([None]),
), )
else:
_lowercase : Dict = tf.data.Dataset.from_generator(
lowerCamelCase, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa), (
{
'input_ids': tf.TensorShape([None]),
'attention_mask': tf.TensorShape([None]),
'token_type_ids': tf.TensorShape([None]),
},
tf.TensorShape([None]),
), )
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self) -> Any:
"""simple docstring"""
return len(self.features)
def __getitem__( self, lowerCamelCase) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 89
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __SCREAMING_SNAKE_CASE ( _a ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = 8
# DPR tok
UpperCamelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase__ = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
UpperCamelCase__ = os.path.join(__lowerCAmelCase , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
UpperCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
UpperCamelCase__ = os.path.join(__lowerCAmelCase , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ = os.path.join(__lowerCAmelCase , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
def _lowerCamelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def _lowerCamelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _lowerCamelCase ( self ):
UpperCamelCase__ = os.path.join(self.tmpdirname , """rag_tokenizer""" )
UpperCamelCase__ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCamelCase__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__lowerCAmelCase )
rag_tokenizer.save_pretrained(__lowerCAmelCase )
UpperCamelCase__ = RagTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __lowerCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __lowerCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
UpperCamelCase__ = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCamelCase__ = tokenizer(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
UpperCamelCase__ = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCamelCase__ = tokenizer(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
| 619
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''roberta-prelayernorm'''
def __init__( self , _lowerCAmelCase=50_265 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3_072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase ( snake_case_ ):
@property
def __lowerCAmelCase ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 713
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 664
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowercase : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
__A : Optional[datasets.Features] = None
__A : str = "utf-8"
__A : Optional[str] = None
__A : Optional[str] = None
__A : bool = True # deprecated
__A : Optional[int] = None # deprecated
__A : int = 1_0 << 2_0 # 10MB
__A : Optional[bool] = None
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__A : Any = JsonConfig
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
a__ : Dict = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def __lowercase ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
a__ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple)):
a__ : Dict = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
a__ : Any = [files]
a__ : str = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
a__ : str = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
a__ : str = [files]
a__ : List[Any] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'files': files}))
return splits
def __lowercase ( self , lowercase) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
a__ : Optional[int] = self.config.features.arrow_schema.field(SCREAMING_SNAKE_CASE_).type
a__ : List[str] = pa_table.append_column(SCREAMING_SNAKE_CASE_ , pa.array([None] * len(SCREAMING_SNAKE_CASE_) , type=SCREAMING_SNAKE_CASE_))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a__ : Dict = table_cast(SCREAMING_SNAKE_CASE_ , self.config.features.arrow_schema)
return pa_table
def __lowercase ( self , lowercase) -> List[str]:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
a__ : List[Any] = json.load(SCREAMING_SNAKE_CASE_)
# We keep only the field we are interested in
a__ : Optional[Any] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple)):
a__ : str = set().union(*[row.keys() for row in dataset])
a__ : Dict = {col: [row.get(SCREAMING_SNAKE_CASE_) for row in dataset] for col in keys}
else:
a__ : Union[str, Any] = dataset
a__ : Dict = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_)
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_)
# If the file has one json object per line
else:
with open(SCREAMING_SNAKE_CASE_ , 'rb') as f:
a__ : List[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a__ : Optional[int] = max(self.config.chunksize // 32 , 16 << 10)
a__ : List[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
a__ : List[Any] = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(SCREAMING_SNAKE_CASE_)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a__ : str = batch.decode(self.config.encoding , errors=SCREAMING_SNAKE_CASE_).encode('utf-8')
try:
while True:
try:
a__ : List[str] = paj.read_json(
io.BytesIO(SCREAMING_SNAKE_CASE_) , read_options=paj.ReadOptions(block_size=SCREAMING_SNAKE_CASE_))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(SCREAMING_SNAKE_CASE_ , pa.ArrowInvalid)
and "straddling" not in str(SCREAMING_SNAKE_CASE_)
or block_size > len(SCREAMING_SNAKE_CASE_)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(SCREAMING_SNAKE_CASE_)} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.')
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
a__ : Optional[int] = json.load(SCREAMING_SNAKE_CASE_)
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_)}: {e}')
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): # list is the only sequence type supported in JSON
try:
a__ : Optional[Any] = set().union(*[row.keys() for row in dataset])
a__ : str = {col: [row.get(SCREAMING_SNAKE_CASE_) for row in dataset] for col in keys}
a__ : Tuple = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_)}: {e}')
raise ValueError(F'Not able to read records in the JSON file at {file}.') from None
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_)
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_)}: {e}')
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys()))}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ') from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_)
batch_idx += 1
| 302
|
import unittest
from knapsack import knapsack as k
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ = 0
lowercase_ = [0]
lowercase_ = [0]
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
lowercase_ = [6_0]
lowercase_ = [1_0]
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
def _lowercase ( self : int ) -> str:
lowercase_ = 3
lowercase_ = [1, 2, 3]
lowercase_ = [3, 2, 1]
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 5 )
def _lowercase ( self : str ) -> Tuple:
lowercase_ = 5_0
lowercase_ = [6_0, 1_0_0, 1_2_0]
lowercase_ = [1_0, 2_0, 3_0]
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 97
| 0
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
snake_case_ = logging.getLogger(__name__)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , encoding="utf_8" ) as f:
SCREAMING_SNAKE_CASE_ : int = csv.reader(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[Any] = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE_ : int = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Dict = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : str = with_conta
SCREAMING_SNAKE_CASE_ : int = with_conta
SCREAMING_SNAKE_CASE_ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ ) - 1
SCREAMING_SNAKE_CASE_ : List[str] = with_conta
SCREAMING_SNAKE_CASE_ : Optional[Any] = with_conta
SCREAMING_SNAKE_CASE_ : Optional[int] = mc_label
SCREAMING_SNAKE_CASE_ : Optional[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE_ , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE_ , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE_ , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE_ , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE_ , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE_ , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE_ , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE_ : Any = ["_start_", "_delimiter_", "_classify_"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info("Encoding dataset..." )
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE_ : Dict = (train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE_ : List[str] = model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE_ : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE_ : str = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = RandomSampler(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : str = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE_ : Any = TensorDataset(*SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = SequentialSampler(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE_ : str = args.max_steps
SCREAMING_SNAKE_CASE_ : Any = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE_ : Any = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE_ : Optional[int] = list(model.named_parameters() )
SCREAMING_SNAKE_CASE_ : Any = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE_ : List[Any] = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : int = tqdm(SCREAMING_SNAKE_CASE_ , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = batch
SCREAMING_SNAKE_CASE_ : List[str] = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = model.module if hasattr(SCREAMING_SNAKE_CASE_ , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : str = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE_ : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = 0, 0
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc="Evaluating" ):
SCREAMING_SNAKE_CASE_ : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = batch
with torch.no_grad():
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE_ : List[Any] = mc_labels.to("cpu" ).numpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE_ : List[Any] = eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE_ : int = tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE_ : Tuple = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 68
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [1]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = list(range(SCREAMING_SNAKE_CASE_ ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ : Any = factorials.pop()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68
| 1
|
'''simple docstring'''
def a__ ( lowercase : str ) -> Any:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(lowercase, (list, tuple) ) or not all(
isinstance(lowercase, lowercase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
_UpperCamelCase = _UpperCamelCase = _UpperCamelCase = numbers[0]
for i in range(1, len(lowercase ) ):
# update the maximum and minimum subarray products
_UpperCamelCase = numbers[i]
if number < 0:
_UpperCamelCase , _UpperCamelCase = min_till_now, max_till_now
_UpperCamelCase = max(lowercase, max_till_now * number )
_UpperCamelCase = min(lowercase, min_till_now * number )
# update the maximum product found till now
_UpperCamelCase = max(lowercase, lowercase )
return max_prod
| 98
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _A ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 / 255 , SCREAMING_SNAKE_CASE_=True , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_pad
def _a (self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> str:
'''simple docstring'''
if not batched:
UpperCamelCase__ = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = image.size
else:
UpperCamelCase__ , UpperCamelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ = self.size['''shortest_edge''']
UpperCamelCase__ = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ = self.size['''shortest_edge''']
UpperCamelCase__ = self.size['''shortest_edge''']
else:
UpperCamelCase__ = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0]
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] =DetaImageProcessor if is_vision_available() else None
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = DetaImageProcessingTester(self )
@property
def _a (self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_rescale''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_pad''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> List[Any]:
'''simple docstring'''
pass
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
UpperCamelCase__ = DetaImageProcessor()
UpperCamelCase__ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
@slow
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
UpperCamelCase__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ = DetaImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify masks
UpperCamelCase__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE_ )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
| 415
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def A ( self : Tuple , _A : Dict=0 ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(lowercase_ ) )
UpperCAmelCase_ : Optional[Any] = np.random.RandomState(lowercase_ )
UpperCAmelCase_ : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs()
UpperCAmelCase_ : Tuple = pipe(**lowercase_ ).images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : List[Any] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs()
UpperCAmelCase_ : Dict = pipe(**lowercase_ ).images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : Dict = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
# warmup pass to apply optimizations
UpperCAmelCase_ : Union[str, Any] = pipe(**self.get_dummy_inputs() )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs()
UpperCAmelCase_ : Dict = pipe(**lowercase_ ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : str = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs()
UpperCAmelCase_ : List[Any] = pipe(**lowercase_ ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : List[str] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs()
UpperCAmelCase_ : Union[str, Any] = pipe(**lowercase_ ).images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : Dict = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : int = self.get_dummy_inputs()
UpperCAmelCase_ : Optional[Any] = pipe(**lowercase_ ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : Any = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__ ( unittest.TestCase):
@property
def A ( self : Union[str, Any] ) -> Optional[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = ort.SessionOptions()
UpperCAmelCase_ : Any = False
return options
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase_ : str = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
UpperCAmelCase_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ : Dict = np.random.RandomState(0 )
UpperCAmelCase_ : Dict = pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
UpperCAmelCase_ : str = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def A ( self : int ) -> str:
UpperCAmelCase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase_ : Optional[Any] = init_image.resize((7_68, 5_12) )
UpperCAmelCase_ : Dict = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[int] = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ : List[str] = np.random.RandomState(0 )
UpperCAmelCase_ : str = pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : Union[str, Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
UpperCAmelCase_ : List[Any] = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 716
|
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( A : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def __UpperCAmelCase ( A : np.array ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.