code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__lowerCamelCase : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( __snake_case ):
def __init__( self , *A_ , A_=None , A_=None , A_=None , **A_ ):
'''simple docstring'''
super().__init__(*A_ , **A_ )
UpperCamelCase : Tuple = eval_examples
UpperCamelCase : Dict = post_process_function
UpperCamelCase : Union[str, Any] = quant_trainer_args
UpperCamelCase : Any = 128 # default number of calibration samples
def __UpperCamelCase( self , A_=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
UpperCamelCase : Any = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCamelCase : Dict = self._remove_unused_columns(A_ , description="Calibration" )
return DataLoader(
A_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A_ , )
def __UpperCamelCase( self , A_=None ):
'''simple docstring'''
UpperCamelCase : Any = self.train_dataset if calib_dataset is None else calib_dataset
UpperCamelCase : Dict = self.get_calib_dataloader(A_ )
UpperCamelCase : Optional[int] = self.model
quant_trainer.configure_model(A_ , self.quant_trainer_args , calib=A_ )
model.eval()
quant_trainer.enable_calibration(A_ )
logger.info("***** Running calibration *****" )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(A_ ):
# Prediction step
UpperCamelCase : List[Any] = self.prediction_step(A_ , A_ , prediction_loss_only=A_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A_ , self.quant_trainer_args )
UpperCamelCase : str = model
def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_ = "eval" ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : Any = self.get_eval_dataloader(A_ )
UpperCamelCase : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : List[Any] = self.compute_metrics
UpperCamelCase : Dict = None
UpperCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase : int = eval_loop(
A_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCamelCase : List[str] = self.post_process_function(A_ , A_ , output.predictions )
UpperCamelCase : Dict = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCamelCase : Optional[int] = metrics.pop(A_ )
self.log(A_ )
else:
UpperCamelCase : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_ )
return metrics
def __UpperCamelCase( self , A_ , A_ , A_=None , A_ = "test" ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_test_dataloader(A_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : int = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase : Any = eval_loop(
A_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
UpperCamelCase : Dict = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : List[str] = self.post_process_function(A_ , A_ , output.predictions , "predict" )
UpperCamelCase : Optional[Any] = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCamelCase : Tuple = metrics.pop(A_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_ )
def __UpperCamelCase( self , A_="./" ):
'''simple docstring'''
UpperCamelCase : str = self.eval_dataset
UpperCamelCase : Dict = self.get_eval_dataloader(A_ )
UpperCamelCase : List[str] = next(iter(A_ ) )
# saving device - to make it consistent
UpperCamelCase : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
UpperCamelCase : Union[str, Any] = tuple(v.to(A_ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
UpperCamelCase : str = True
UpperCamelCase : int = self.model.to(A_ )
model.eval()
model.float()
UpperCamelCase : Tuple = model.module if hasattr(A_ , "module" ) else model
quant_trainer.configure_model(A_ , self.quant_trainer_args )
UpperCamelCase : int = os.path.join(A_ , "model.onnx" )
logger.info(F"""exporting model to {output_model_file}""" )
UpperCamelCase : List[Any] = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
A_ , A_ , A_ , export_params=A_ , opset_version=13 , do_constant_folding=A_ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=A_ , )
logger.info("onnx export finished" )
| 707
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : str = 13
UpperCamelCase : int = 7
UpperCamelCase : str = True
UpperCamelCase : Dict = True
UpperCamelCase : str = True
UpperCamelCase : Tuple = True
UpperCamelCase : List[str] = 99
UpperCamelCase : Optional[Any] = 384
UpperCamelCase : Tuple = 2
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Dict = 37
UpperCamelCase : Any = "gelu"
UpperCamelCase : List[Any] = 0.1
UpperCamelCase : int = 0.1
UpperCamelCase : Tuple = 512
UpperCamelCase : List[Any] = 16
UpperCamelCase : int = 2
UpperCamelCase : Dict = 0.02
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : List[Any] = 4
UpperCamelCase : Dict = 128
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = 9
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : str = None
if self.use_input_mask:
UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel(config=A_ )
UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Optional[int] = [input_ids, input_mask]
UpperCamelCase : Any = model(A_ )
UpperCamelCase : int = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_choices
UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TFConvBertModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = True
if hasattr(A_ , "use_cache" ):
UpperCamelCase : List[str] = True
UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ )
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Optional[int] = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" )
UpperCamelCase : Dict = tf.keras.models.load_model(A_ )
UpperCamelCase : str = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"]
UpperCamelCase : Any = outputs["encoder_attentions"]
else:
UpperCamelCase : Any = outputs["hidden_states"]
UpperCamelCase : List[str] = outputs["attentions"]
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase : Optional[Any] = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase : Any = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[Any] = False
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : List[str] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = model_class(A_ )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : List[str] = model(A_ )[0]
UpperCamelCase : int = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[str] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 38
| 0
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 708
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'camembert'
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = use_cache
UpperCamelCase : List[str] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 38
| 0
|
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str:
UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def A_ ( _lowerCAmelCase ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.keras.activations.gelu
__lowerCamelCase : int = approximate_gelu_wrap
else:
__lowerCamelCase : List[Any] = _gelu
__lowerCamelCase : Optional[Any] = _gelu_new
__lowerCamelCase : Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 709
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = 'segformer'
def __init__( self , A_=3 , A_=4 , A_=[2, 2, 2, 2] , A_=[8, 4, 2, 1] , A_=[32, 64, 160, 256] , A_=[7, 3, 3, 3] , A_=[4, 2, 2, 2] , A_=[1, 2, 5, 8] , A_=[4, 4, 4, 4] , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.02 , A_=0.1 , A_=1e-6 , A_=256 , A_=255 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , A_ , )
UpperCamelCase : Tuple = num_channels
UpperCamelCase : int = num_encoder_blocks
UpperCamelCase : Dict = depths
UpperCamelCase : Optional[int] = sr_ratios
UpperCamelCase : Optional[int] = hidden_sizes
UpperCamelCase : List[str] = patch_sizes
UpperCamelCase : Tuple = strides
UpperCamelCase : Optional[int] = mlp_ratios
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Any = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Tuple = classifier_dropout_prob
UpperCamelCase : Dict = initializer_range
UpperCamelCase : str = drop_path_rate
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Optional[Any] = decoder_hidden_size
UpperCamelCase : Any = kwargs.get("reshape_last_stage" , A_ )
UpperCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = version.parse('1.11' )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 1e-4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 12
| 710
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase :Tuple = 'BlipImageProcessor'
_UpperCAmelCase :Optional[int] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : int = self.tokenizer
UpperCamelCase : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A__ :
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
_UpperCAmelCase :Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
_UpperCAmelCase :Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
_UpperCAmelCase :Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
_UpperCAmelCase :Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
_UpperCAmelCase :Optional[int] = field(
default=1_0_0_0_0 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
_UpperCAmelCase :Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
_UpperCAmelCase :Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
_UpperCAmelCase :Optional[int] = field(
default=7_5_0 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
_UpperCAmelCase :Optional[int] = field(
default=1_6 , metadata={'help': 'Number of gradient accumulation steps.'} )
_UpperCAmelCase :Optional[bool] = field(
default=__snake_case , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
_UpperCAmelCase :Optional[int] = field(default=5_0_0_0_0 , metadata={'help': 'Maximum number of training steps.'} )
_UpperCAmelCase :Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
_UpperCAmelCase :Optional[int] = field(default=1_0_2_4 , metadata={'help': 'Sequence lengths used for training.'} )
_UpperCAmelCase :Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
_UpperCAmelCase :Optional[int] = field(
default=1_0_2_4 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
_UpperCAmelCase :Optional[bool] = field(default=__snake_case , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class A__ :
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
_UpperCAmelCase :Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
_UpperCAmelCase :Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
_UpperCAmelCase :Optional[int] = field(default=1_0_2_4 , metadata={'help': 'Length of sequences to be evaluated.'} )
_UpperCAmelCase :Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class A__ :
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
_UpperCAmelCase :Optional[int] = field(default=__snake_case , metadata={'help': 'Number of workers used for code evaluation.'} )
_UpperCAmelCase :Optional[int] = field(
default=__snake_case , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
_UpperCAmelCase :Optional[bool] = field(
default=__snake_case , metadata={'help': 'Sample from the language model\'s output distribution.'} )
_UpperCAmelCase :Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
_UpperCAmelCase :Optional[int] = field(default=2_5_6 , metadata={'help': 'Maximum number of newly generated tokens.'} )
_UpperCAmelCase :Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
_UpperCAmelCase :Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
_UpperCAmelCase :Optional[int] = field(default=1_0 , metadata={'help': 'Number of generations to run in parallel.'} )
_UpperCAmelCase :Optional[int] = field(
default=2_0_0 , metadata={'help': 'Number of completions to generate for each sample.'} )
_UpperCAmelCase :Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
_UpperCAmelCase :Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
_UpperCAmelCase :Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
_UpperCAmelCase :Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class A__ :
_UpperCAmelCase :Optional[int] = field(
default=__snake_case , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
_UpperCAmelCase :Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
_UpperCAmelCase :Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
_UpperCAmelCase :Optional[int] = field(
default=1_0_0_0_0_0 , metadata={'help': 'Number of files to save per JSON output file.'} )
_UpperCAmelCase :Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
_UpperCAmelCase :Optional[float] = field(
default=1_0_0_0 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
_UpperCAmelCase :Optional[float] = field(
default=1_0_0 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
_UpperCAmelCase :Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
_UpperCAmelCase :Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
_UpperCAmelCase :Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
_UpperCAmelCase :Optional[bool] = field(
default=__snake_case , metadata={'help': 'If True, near-duplicate samples are removed.'} )
_UpperCAmelCase :Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class A__ :
_UpperCAmelCase :Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
_UpperCAmelCase :Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
_UpperCAmelCase :Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
_UpperCAmelCase :Optional[int] = field(default=2_0_0_0_0_0 , metadata={'help': 'Number of examples to train tokenizer on.'} )
_UpperCAmelCase :Optional[int] = field(
default=3_2_7_6_8 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
_UpperCAmelCase :Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
_UpperCAmelCase :Optional[bool] = field(default=__snake_case , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class A__ :
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
_UpperCAmelCase :Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
_UpperCAmelCase :Optional[int] = field(default=__snake_case , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class A__ :
_UpperCAmelCase :Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
_UpperCAmelCase :Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
_UpperCAmelCase :Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
_UpperCAmelCase :Optional[bool] = field(default=__snake_case , metadata={'help': 'Push saved tokenizer to the hub.'} )
| 711
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38
| 0
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A__ ( __snake_case ):
'''simple docstring'''
def __get__( self , A_ , A_=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
UpperCamelCase : Tuple = "__cached_" + self.fget.__name__
UpperCamelCase : Optional[Any] = getattr(A_ , A_ , A_ )
if cached is None:
UpperCamelCase : List[str] = self.fget(A_ )
setattr(A_ , A_ , A_ )
return cached
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[int] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def A_ ( _lowerCAmelCase ) -> Dict:
if is_torch_fx_proxy(_lowerCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCAmelCase , np.ndarray )
def A_ ( _lowerCAmelCase ) -> Tuple:
return isinstance(_lowerCAmelCase , np.ndarray )
def A_ ( _lowerCAmelCase ) -> Tuple:
return _is_numpy(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Any:
import torch
return isinstance(_lowerCAmelCase , torch.Tensor )
def A_ ( _lowerCAmelCase ) -> Tuple:
return False if not is_torch_available() else _is_torch(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
import torch
return isinstance(_lowerCAmelCase , torch.device )
def A_ ( _lowerCAmelCase ) -> Any:
return False if not is_torch_available() else _is_torch_device(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
import torch
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : str = getattr(_lowerCAmelCase , _lowerCAmelCase )
else:
return False
return isinstance(_lowerCAmelCase , torch.dtype )
def A_ ( _lowerCAmelCase ) -> str:
return False if not is_torch_available() else _is_torch_dtype(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> str:
import tensorflow as tf
return isinstance(_lowerCAmelCase , tf.Tensor )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
return False if not is_tf_available() else _is_tensorflow(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCAmelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCAmelCase )
return type(_lowerCAmelCase ) == tf.Tensor
def A_ ( _lowerCAmelCase ) -> List[str]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> List[str]:
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCAmelCase , jnp.ndarray )
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
return False if not is_flax_available() else _is_jax(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
if isinstance(_lowerCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_py_obj(_lowerCAmelCase ) for o in obj]
elif is_tf_tensor(_lowerCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCAmelCase ):
return np.asarray(_lowerCAmelCase ).tolist()
elif isinstance(_lowerCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A_ ( _lowerCAmelCase ) -> Dict:
if isinstance(_lowerCAmelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return np.array(_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCAmelCase ):
return np.asarray(_lowerCAmelCase )
else:
return obj
class A__ ( __snake_case ):
'''simple docstring'''
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = fields(self )
# Safety and consistency checks
if not len(A_ ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCamelCase : Any = getattr(self , class_fields[0].name )
UpperCamelCase : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A_ ):
if isinstance(A_ , A_ ):
UpperCamelCase : str = first_field.items()
UpperCamelCase : Dict = True
else:
try:
UpperCamelCase : Optional[int] = iter(A_ )
UpperCamelCase : int = True
except TypeError:
UpperCamelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A_ ):
if (
not isinstance(A_ , (list, tuple) )
or not len(A_ ) == 2
or not isinstance(element[0] , A_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCamelCase : Dict = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCamelCase : str = element[1]
elif first_field is not None:
UpperCamelCase : Optional[Any] = first_field
else:
for field in class_fields:
UpperCamelCase : Optional[Any] = getattr(self , field.name )
if v is not None:
UpperCamelCase : Dict = v
def __delitem__( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase : List[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , A_ , A_ ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A_ , A_ )
super().__setattr__(A_ , A_ )
def __setitem__( self , A_ , A_ ):
'''simple docstring'''
super().__setitem__(A_ , A_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class A__ ( __snake_case , __snake_case ):
'''simple docstring'''
@classmethod
def __UpperCamelCase( cls , A_ ):
'''simple docstring'''
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class A__ ( __snake_case ):
'''simple docstring'''
_UpperCAmelCase :str = 'longest'
_UpperCAmelCase :Any = 'max_length'
_UpperCAmelCase :str = 'do_not_pad'
class A__ ( __snake_case ):
'''simple docstring'''
_UpperCAmelCase :Dict = 'pt'
_UpperCAmelCase :List[Any] = 'tf'
_UpperCAmelCase :List[str] = 'np'
_UpperCAmelCase :List[Any] = 'jax'
class A__ :
'''simple docstring'''
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = context_managers
UpperCamelCase : List[str] = ExitStack()
def __enter__( self ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(A_ )
def __exit__( self , *A_ , **A_ ):
'''simple docstring'''
self.stack.__exit__(*A_ , **A_ )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = infer_framework(_lowerCAmelCase )
if framework == "tf":
UpperCamelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : Dict = model_class.__name__
UpperCamelCase : int = infer_framework(_lowerCAmelCase )
if framework == "tf":
UpperCamelCase : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A_ ( _lowerCAmelCase , _lowerCAmelCase = "" , _lowerCAmelCase = "." ) -> Optional[int]:
def _flatten_dict(_lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="." ):
for k, v in d.items():
UpperCamelCase : List[Any] = str(_lowerCAmelCase ) + delimiter + str(_lowerCAmelCase ) if parent_key else k
if v and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
yield from flatten_dict(_lowerCAmelCase , _lowerCAmelCase , delimiter=_lowerCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) )
@contextmanager
def A_ ( _lowerCAmelCase , _lowerCAmelCase = False ) -> List[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
if is_numpy_array(_lowerCAmelCase ):
return np.transpose(_lowerCAmelCase , axes=_lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.T if axes is None else array.permute(*_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.transpose(_lowerCAmelCase , perm=_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.transpose(_lowerCAmelCase , axes=_lowerCAmelCase )
else:
raise ValueError(F"""Type not supported for transpose: {type(_lowerCAmelCase )}.""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
if is_numpy_array(_lowerCAmelCase ):
return np.reshape(_lowerCAmelCase , _lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.reshape(*_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.reshape(_lowerCAmelCase , _lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.reshape(_lowerCAmelCase , _lowerCAmelCase )
else:
raise ValueError(F"""Type not supported for reshape: {type(_lowerCAmelCase )}.""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
if is_numpy_array(_lowerCAmelCase ):
return np.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase )
else:
raise ValueError(F"""Type not supported for squeeze: {type(_lowerCAmelCase )}.""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
if is_numpy_array(_lowerCAmelCase ):
return np.expand_dims(_lowerCAmelCase , _lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.unsqueeze(dim=_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCAmelCase , axis=_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.expand_dims(_lowerCAmelCase , axis=_lowerCAmelCase )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(_lowerCAmelCase )}.""" )
def A_ ( _lowerCAmelCase ) -> List[str]:
if is_numpy_array(_lowerCAmelCase ):
return np.size(_lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.numel()
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.size(_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(_lowerCAmelCase )}.""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
for key, value in auto_map.items():
if isinstance(_lowerCAmelCase , (tuple, list) ):
UpperCamelCase : Any = [F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCamelCase : List[Any] = F"""{repo_id}--{value}"""
return auto_map
def A_ ( _lowerCAmelCase ) -> int:
for base_class in inspect.getmro(_lowerCAmelCase ):
UpperCamelCase : List[str] = base_class.__module__
UpperCamelCase : Any = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 712
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar("""KT""")
__lowerCamelCase : Dict = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
UpperCamelCase : int = key
UpperCamelCase : List[Any] = value
UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"""Node({self.key}: {self.value})"""
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = p
UpperCamelCase : List[str] = max_level
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = list(self )
if len(A_ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 )
UpperCamelCase : Dict = max(A_ , 4 ) + 4
UpperCamelCase : str = self.head
UpperCamelCase : List[Any] = []
UpperCamelCase : int = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) )
lines.append(" " * label_size + "| " * len(A_ ) )
while len(node.forward ) != 0:
UpperCamelCase : Union[str, Any] = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(A_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(A_ ) )
UpperCamelCase : Tuple = node.forward
lines.append("None".ljust(A_ ) + "* " * len(A_ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase : Union[str, Any] = node.forward[0]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase : Tuple = node.forward[i]
else:
UpperCamelCase : List[Any] = update_node.forward[:i]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ )
if node is not None:
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
UpperCamelCase : Optional[int] = level
UpperCamelCase : Dict = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
UpperCamelCase : List[Any] = new_node
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ )
if node is not None:
return node.value
return None
def A_ ( ) -> List[Any]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCamelCase : Optional[int] = skip_list.head
UpperCamelCase : List[str] = {}
while node.level != 0:
UpperCamelCase : str = node.forward[0]
UpperCamelCase : Optional[int] = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> List[Any]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCamelCase : Dict = skip_list.head
UpperCamelCase : Tuple = {}
while node.level != 0:
UpperCamelCase : List[str] = node.forward[0]
UpperCamelCase : Dict = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def A_ ( ) -> Dict:
UpperCamelCase : Optional[int] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> Dict:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[str]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Union[str, Any]:
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
UpperCamelCase : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def A_ ( ) -> Tuple:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Union[str, Any]:
UpperCamelCase : Any = "backbone." if is_semantic else ""
UpperCamelCase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(F"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(F"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(F"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> int:
for i in range(config.num_hidden_layers ):
UpperCamelCase : Any = "backbone." if is_semantic else ""
# queries, keys and values
UpperCamelCase : List[Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
UpperCamelCase : List[str] = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
UpperCamelCase : Union[str, Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
UpperCamelCase : Any = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase : List[str] = q_bias
UpperCamelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase : List[str] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase : str = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
UpperCamelCase : Dict = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
UpperCamelCase : Optional[Any] = gamma_a
UpperCamelCase : str = gamma_a
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : str = dct.pop(_lowerCAmelCase )
UpperCamelCase : int = val
def A_ ( ) -> int:
UpperCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
UpperCamelCase : Dict = False if "rvlcdip" in checkpoint_url else True
UpperCamelCase : List[str] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase : Optional[Any] = 1024
UpperCamelCase : int = 4096
UpperCamelCase : Tuple = 24
UpperCamelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase : str = 16
UpperCamelCase : Optional[Any] = "huggingface/label-files"
UpperCamelCase : Optional[int] = "rvlcdip-id2label.json"
UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase : Dict = idalabel
UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["model"]
UpperCamelCase : Dict = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
UpperCamelCase : Dict = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
UpperCamelCase : Tuple = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
UpperCamelCase : int = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
UpperCamelCase : Tuple = encoding["pixel_values"]
UpperCamelCase : Optional[Any] = model(_lowerCAmelCase )
UpperCamelCase : List[Any] = outputs.logits
# verify logits
UpperCamelCase : Tuple = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCamelCase : List[str] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
UpperCamelCase : Optional[int] = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 713
|
from PIL import Image
def A_ ( _lowerCAmelCase ) -> Image:
UpperCamelCase , UpperCamelCase : List[Any] = image.size
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 38
| 0
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=A_ , )
assert hasattr(self , "env" )
def __UpperCamelCase( self , A_=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
TrainingJobAnalytics(A_ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , A_ )
| 714
|
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
| 0
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Optional[Any] = nn.Linear(3 , 4 )
UpperCamelCase : Dict = nn.BatchNormad(4 )
UpperCamelCase : List[str] = nn.Linear(4 , 5 )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(A_ ) ) )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , model.state_dict() )
UpperCamelCase : Optional[Any] = os.path.join(A_ , "index.json" )
self.assertTrue(os.path.isfile(A_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase : Optional[int] = os.path.join(A_ , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(A_ ) )
# TODO: add tests on the fact weights are properly loaded
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase : Any = torch.randn(2 , 3 , dtype=A_ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase : str = offload_weight(A_ , "weight" , A_ , {} )
UpperCamelCase : Optional[Any] = os.path.join(A_ , "weight.dat" )
self.assertTrue(os.path.isfile(A_ ) )
self.assertDictEqual(A_ , {"weight": {"shape": [2, 3], "dtype": str(A_ ).split("." )[1]}} )
UpperCamelCase : List[Any] = load_offloaded_weight(A_ , index["weight"] )
self.assertTrue(torch.equal(A_ , A_ ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = ModelForTest()
UpperCamelCase : Optional[int] = model.state_dict()
UpperCamelCase : List[Any] = {k: v for k, v in state_dict.items() if "linear2" not in k}
UpperCamelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , A_ )
UpperCamelCase : Optional[int] = OffloadedWeightsLoader(state_dict=A_ , save_folder=A_ )
# Every key is there with the right value
self.assertEqual(sorted(A_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A_ , weight_map[key] ) )
UpperCamelCase : str = {k: v for k, v in state_dict.items() if "weight" in k}
UpperCamelCase : List[str] = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , A_ )
UpperCamelCase : Any = OffloadedWeightsLoader(state_dict=A_ , save_folder=A_ )
# Every key is there with the right value
self.assertEqual(sorted(A_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , A_ )
# Duplicates are removed
UpperCamelCase : Dict = OffloadedWeightsLoader(state_dict=A_ , save_folder=A_ )
# Every key is there with the right value
self.assertEqual(sorted(A_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A_ , weight_map[key] ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = {"a.1": 0, "a.10": 1, "a.2": 2}
UpperCamelCase : int = extract_submodules_state_dict(A_ , ["a.1", "a.2"] )
self.assertDictEqual(A_ , {"a.1": 0, "a.2": 2} )
UpperCamelCase : Tuple = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
UpperCamelCase : Dict = extract_submodules_state_dict(A_ , ["a.1", "a.2"] )
self.assertDictEqual(A_ , {"a.1.a": 0, "a.2.a": 2} )
| 715
|
from __future__ import annotations
__lowerCamelCase : Optional[int] = """Muhammad Umer Farooq"""
__lowerCamelCase : Tuple = """MIT"""
__lowerCamelCase : Optional[int] = """1.0.0"""
__lowerCamelCase : int = """Muhammad Umer Farooq"""
__lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me"""
__lowerCamelCase : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : str = domain
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : Any = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def A_ ( _lowerCAmelCase ) -> str:
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] )
def A_ ( _lowerCAmelCase ) -> str:
return parse.urlparse(_lowerCAmelCase ).netloc
def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]:
UpperCamelCase : int = get_domain_name(_lowerCAmelCase )
# Initialize the parser
UpperCamelCase : str = Parser(_lowerCAmelCase )
try:
# Open URL
UpperCamelCase : int = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase )
# Get the valid email.
UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 38
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : List[str] = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : int = use_input_mask
UpperCamelCase : int = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : Optional[int] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : Dict = type_vocab_size
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Optional[int] = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : List[Any] = scope
UpperCamelCase : Optional[Any] = vocab_size - 1
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase : Dict = True
return config, input_ids, input_mask, token_labels
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = GPTNeoXModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Any = model(A_ , attention_mask=A_ )
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = True
UpperCamelCase : Optional[Any] = GPTNeoXModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[Any] = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = GPTNeoXForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : int = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : Union[str, Any] = GPTNeoXForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : int = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : List[str] = GPTNeoXForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[str] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Optional[int] = GPTNeoXForTokenClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = True
UpperCamelCase : Tuple = GPTNeoXForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase : str = model(A_ , attention_mask=A_ , use_cache=A_ )
UpperCamelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase : Optional[Any] = model(A_ , attention_mask=A_ , output_hidden_states=A_ )
UpperCamelCase : Any = output_from_no_past["hidden_states"][0]
UpperCamelCase : List[str] = model(
A_ , attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )["hidden_states"][0]
# select random slice
UpperCamelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase : Optional[Any] = config_and_inputs
UpperCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Dict = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = GPTNeoXModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=A_ , hidden_size=64 , num_attention_heads=8 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ , A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A_ , A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase : Dict = GPTNeoXModel(A_ )
original_model.to(A_ )
original_model.eval()
UpperCamelCase : List[Any] = original_model(A_ ).last_hidden_state
UpperCamelCase : Optional[Any] = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase : Union[str, Any] = {"type": scaling_type, "factor": 10.0}
UpperCamelCase : List[Any] = GPTNeoXModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
UpperCamelCase : Dict = scaled_model(A_ ).last_hidden_state
UpperCamelCase : Any = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
UpperCamelCase : str = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A_ )
UpperCamelCase : Optional[Any] = tokenizer("My favorite food is" , return_tensors="pt" ).to(A_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase : Optional[int] = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
UpperCamelCase : List[Any] = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 )
UpperCamelCase : int = tokenizer.batch_decode(A_ )[0]
self.assertEqual(A_ , A_ )
| 716
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[Any] = [True] * limit
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase : Optional[Any] = i * 2
while index < limit:
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = index + i
UpperCamelCase : Any = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 100_0000 ) -> int:
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase : int = j - i
UpperCamelCase : Dict = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 38
| 0
|
def A_ ( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCamelCase : int = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(
features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
UpperCamelCase : Optional[int] = Generator(
cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
UpperCamelCase : int = self.builder.as_dataset(
split="train" , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
| 38
| 0
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = CodeGenTokenizer
_UpperCAmelCase :Dict = CodeGenTokenizerFast
_UpperCAmelCase :Any = True
_UpperCAmelCase :Any = {'add_prefix_space': True}
_UpperCAmelCase :Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
UpperCamelCase : List[str] = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : Union[str, Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase : Optional[int] = {"unk_token": "<unk>"}
UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = "lower newer"
UpperCamelCase : Dict = "lower newer"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase : Tuple = "lower newer"
UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
UpperCamelCase : List[Any] = tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : Dict = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : int = self.get_rust_tokenizer(add_prefix_space=A_ )
UpperCamelCase : Dict = "lower newer"
# Testing tokenization
UpperCamelCase : Tuple = tokenizer.tokenize(A_ , add_prefix_space=A_ )
UpperCamelCase : int = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
UpperCamelCase : Optional[int] = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
UpperCamelCase : Tuple = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=A_ )
UpperCamelCase : Tuple = tokenizer.encode(A_ , add_prefix_space=A_ )
UpperCamelCase : List[Any] = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
UpperCamelCase : Any = tokens + [rust_tokenizer.unk_token]
UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
pass
def __UpperCamelCase( self , A_=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
UpperCamelCase : Any = "This is a simple input"
UpperCamelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
UpperCamelCase : Optional[int] = ("This is a simple input", "This is a pair")
UpperCamelCase : Dict = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding="max_length" )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding="max_length" )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding="max_length" , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding="max_length" )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding="max_length" )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding="max_length" , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
UpperCamelCase : Optional[int] = "This is a simple input"
UpperCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
UpperCamelCase : Optional[int] = ("This is a simple input", "This is a pair")
UpperCamelCase : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
UpperCamelCase : int = tokenizer.pad_token_id
UpperCamelCase : List[str] = tokenizer(A_ , padding="max_length" , max_length=30 , return_tensors="np" )
UpperCamelCase : List[Any] = tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors="np" )
UpperCamelCase : List[str] = tokenizer(*A_ , padding="max_length" , max_length=60 , return_tensors="np" )
UpperCamelCase : Union[str, Any] = tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = "$$$"
UpperCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
UpperCamelCase : Tuple = "This is a simple input"
UpperCamelCase : str = ["This is a simple input 1", "This is a simple input 2"]
UpperCamelCase : Tuple = tokenizer.bos_token_id
UpperCamelCase : Union[str, Any] = tokenizer(A_ )
UpperCamelCase : int = tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCamelCase : int = tokenizer.decode(out_s.input_ids )
UpperCamelCase : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
UpperCamelCase : List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
UpperCamelCase : Optional[int] = "\nif len_a > len_b: result = a\nelse: result = b"
UpperCamelCase : int = tokenizer.encode(A_ )
UpperCamelCase : int = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
UpperCamelCase : Any = tokenizer.decode(A_ , truncate_before_pattern=A_ )
self.assertEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
| 718
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 38
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
__lowerCamelCase : Optional[int] = logging.getLogger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_UpperCAmelCase :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
_UpperCAmelCase :int = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCAmelCase :bool = field(
default=__snake_case , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_UpperCAmelCase :bool = field(
default=__snake_case , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
_UpperCAmelCase :Optional[int] = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_UpperCAmelCase :Optional[int] = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_UpperCAmelCase :Optional[int] = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'A csv or a json file containing the training data.'} )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'A csv or a json file containing the validation data.'} )
_UpperCAmelCase :Optional[str] = field(default=__snake_case , metadata={'help': 'A csv or a json file containing the test data.'} )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
UpperCamelCase : Tuple = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCamelCase : Optional[Any] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class A__ :
_UpperCAmelCase :str = field(
default=__snake_case , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_UpperCAmelCase :bool = field(
default=__snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_UpperCAmelCase :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCAmelCase :bool = field(
default=__snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def A_ ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCamelCase : List[Any] = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
datasets.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCamelCase : Tuple = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCamelCase : str = data_args.train_file.split("." )[-1]
UpperCamelCase : Union[str, Any] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCamelCase : Dict = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
UpperCamelCase : Optional[Any] = load_dataset("csv" , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCamelCase : str = load_dataset("json" , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCamelCase : Optional[Any] = raw_datasets["train"].features["label"].names
UpperCamelCase : Optional[int] = len(_lowerCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
UpperCamelCase : Any = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowerCAmelCase , )
UpperCamelCase : str = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
UpperCamelCase : str = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCamelCase : Tuple = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCamelCase : Optional[int] = {"Refused": 0, "Entailed": 1}
UpperCamelCase : Tuple = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCamelCase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_lowerCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(_lowerCAmelCase ):
UpperCamelCase : Any = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
UpperCamelCase : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
UpperCamelCase : str = examples["statement"]
UpperCamelCase : Dict = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
UpperCamelCase : str = tokenizer(_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase )
UpperCamelCase : Any = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
UpperCamelCase : List[Any] = raw_datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
UpperCamelCase : int = raw_datasets["train"]
if data_args.max_train_samples is not None:
UpperCamelCase : Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
UpperCamelCase : Optional[Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
UpperCamelCase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
UpperCamelCase : Tuple = raw_datasets["test"]
if data_args.max_predict_samples is not None:
UpperCamelCase : List[Any] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_lowerCAmelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase ):
UpperCamelCase : Tuple = p.predictions[0] if isinstance(p.predictions , _lowerCAmelCase ) else p.predictions
UpperCamelCase : Union[str, Any] = np.argmax(_lowerCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCamelCase : Tuple = default_data_collator
elif training_args.fpaa:
UpperCamelCase : Optional[int] = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 )
else:
UpperCamelCase : Tuple = None
# Initialize our Trainer
UpperCamelCase : str = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCamelCase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase : Optional[int] = last_checkpoint
UpperCamelCase : List[Any] = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = train_result.metrics
UpperCamelCase : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
UpperCamelCase : int = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _lowerCAmelCase )
trainer.save_metrics("train" , _lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase : Union[str, Any] = trainer.evaluate(eval_dataset=_lowerCAmelCase )
UpperCamelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase )
UpperCamelCase : Any = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics("eval" , _lowerCAmelCase )
trainer.save_metrics("eval" , _lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCamelCase : Optional[int] = predict_dataset.remove_columns("label" )
UpperCamelCase : Tuple = trainer.predict(_lowerCAmelCase , metric_key_prefix="predict" ).predictions
UpperCamelCase : Union[str, Any] = np.argmax(_lowerCAmelCase , axis=1 )
UpperCamelCase : Tuple = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(_lowerCAmelCase ):
UpperCamelCase : Tuple = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
UpperCamelCase : Dict = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 719
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 38
| 0
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A__ :
def __init__( self , A_ , A_=2 , A_=32 , A_=16 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=[0, 1, 2, 3] , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.02 , A_=3 , A_=[1, 384, 24, 24] , A_=True , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Tuple = image_size
UpperCamelCase : Optional[int] = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : Tuple = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Optional[int] = num_hidden_layers
UpperCamelCase : str = backbone_out_indices
UpperCamelCase : Tuple = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = initializer_range
UpperCamelCase : Dict = num_labels
UpperCamelCase : Union[str, Any] = backbone_featmap_shape
UpperCamelCase : Dict = scope
UpperCamelCase : Any = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase : Any = (image_size // patch_size) ** 2
UpperCamelCase : Optional[int] = num_patches + 1
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = DPTModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_labels
UpperCamelCase : Dict = DPTForDepthEstimation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = self.num_labels
UpperCamelCase : int = DPTForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.prepare_config_and_inputs()
UpperCamelCase : Optional[Any] = config_and_inputs
UpperCamelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_UpperCAmelCase :List[str] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :str = False
_UpperCAmelCase :Dict = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = DPTModelTester(self )
UpperCamelCase : str = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(A_ )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = True
if model_class in get_values(A_ ):
continue
UpperCamelCase : Optional[int] = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase : Optional[int] = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase : Optional[int] = model(**A_ ).loss
loss.backward()
def __UpperCamelCase( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = True
if model_class in get_values(A_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase : str = model_class(A_ )
model.to(A_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase : Any = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase : Any = model(**A_ ).loss
loss.backward()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = model_class(config=A_ )
# Skip the check for the backbone
UpperCamelCase : List[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase : Optional[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase : Dict = DPTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = "add"
with self.assertRaises(A_ ):
UpperCamelCase : Optional[int] = DPTForDepthEstimation(A_ )
def A_ ( ) -> Optional[Any]:
UpperCamelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
UpperCamelCase : Optional[Any] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(A_ )
UpperCamelCase : int = prepare_img()
UpperCamelCase : Tuple = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : List[str] = model(**A_ )
UpperCamelCase : int = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , A_ )
UpperCamelCase : str = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , A_ , atol=1e-4 ) )
| 720
|
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str:
UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def A_ ( _lowerCAmelCase ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.keras.activations.gelu
__lowerCamelCase : int = approximate_gelu_wrap
else:
__lowerCamelCase : List[Any] = _gelu
__lowerCamelCase : Optional[Any] = _gelu_new
__lowerCamelCase : Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 38
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
'''simple docstring'''
def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCamelCase : int = parent
UpperCamelCase : Optional[Any] = 100
UpperCamelCase : List[str] = batch_size
UpperCamelCase : int = image_size
UpperCamelCase : str = patch_size
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : str = is_training
UpperCamelCase : Tuple = use_labels
UpperCamelCase : int = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Dict = type_sequence_label_size
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = scope
UpperCamelCase : Optional[Any] = out_indices
UpperCamelCase : Tuple = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase : List[str] = (image_size // patch_size) ** 2
UpperCamelCase : List[Any] = num_patches + 1
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : int = None
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = BeitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.type_sequence_label_size
UpperCamelCase : List[Any] = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Tuple = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : List[Any] = 1
UpperCamelCase : Any = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Tuple = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : List[str] = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : int = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase : Tuple = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.prepare_config_and_inputs()
UpperCamelCase : Dict = config_and_inputs
UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase :List[str] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase :List[str] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :str = False
_UpperCAmelCase :Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = BeitModelTester(self )
UpperCamelCase : List[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = model_class(A_ )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase : Dict = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase : List[str] = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase : int = model(**A_ ).loss
loss.backward()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase : List[Any] = False
UpperCamelCase : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase : Union[str, Any] = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
UpperCamelCase : Optional[int] = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase : int = model(**A_ ).loss
loss.backward()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Dict = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_ ( ) -> Optional[int]:
UpperCamelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(A_ )
UpperCamelCase : int = self.default_image_processor
UpperCamelCase : int = prepare_img()
UpperCamelCase : Optional[int] = image_processor(images=A_ , return_tensors="pt" ).pixel_values.to(A_ )
# prepare bool_masked_pos
UpperCamelCase : Union[str, Any] = torch.ones((1, 196) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[int] = model(pixel_values=A_ , bool_masked_pos=A_ )
UpperCamelCase : Any = outputs.logits
# verify the logits
UpperCamelCase : List[str] = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase : List[Any] = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(A_ )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Optional[Any] = prepare_img()
UpperCamelCase : int = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Any = model(**A_ )
UpperCamelCase : List[str] = outputs.logits
# verify the logits
UpperCamelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase : Optional[int] = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase : Optional[int] = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
A_ )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Any = model(**A_ )
UpperCamelCase : Tuple = outputs.logits
# verify the logits
UpperCamelCase : str = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase : List[Any] = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase : Optional[int] = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCamelCase : Optional[Any] = model.to(A_ )
UpperCamelCase : Dict = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCamelCase : int = Image.open(ds[0]["file"] )
UpperCamelCase : Any = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : int = model(**A_ )
UpperCamelCase : Optional[int] = outputs.logits
# verify the logits
UpperCamelCase : int = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase : Optional[Any] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCamelCase : Tuple = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=A_ , )
else:
UpperCamelCase : Optional[int] = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCamelCase : List[Any] = model.to(A_ )
UpperCamelCase : str = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase : Optional[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCamelCase : List[Any] = Image.open(ds[0]["file"] )
UpperCamelCase : List[Any] = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**A_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase : int = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase : Any = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase : List[Any] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , A_ )
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Any = {
"""google/rembert""": 256,
}
__lowerCamelCase : Union[str, Any] = """▁"""
class A__ ( __snake_case ):
_UpperCAmelCase :List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Tuple = RemBertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_=True , A_=False , A_="[CLS]" , A_="[SEP]" , A_="<unk>" , A_="[SEP]" , A_="<pad>" , A_="[CLS]" , A_="[MASK]" , **A_ , ):
'''simple docstring'''
UpperCamelCase : List[Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , )
UpperCamelCase : str = do_lower_case
UpperCamelCase : List[str] = remove_space
UpperCamelCase : int = keep_accents
UpperCamelCase : List[Any] = vocab_file
UpperCamelCase : List[Any] = False if not self.vocab_file else True
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Any = [self.sep_token_id]
UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error("Vocabulary path ({}) should be a directory".format(A_ ) )
return
UpperCamelCase : Dict = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 700
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ) -> Optional[int]:
UpperCamelCase : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 702
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 38
| 0
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_=1024 , A_=1024 , A_=3.6 ):
'''simple docstring'''
UpperCamelCase : List[Any] = tokenizer
UpperCamelCase : Optional[int] = tokenizer.bos_token_id
UpperCamelCase : Optional[int] = dataset
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : List[str] = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : List[str] = iter(self.dataset )
UpperCamelCase : Union[str, Any] = True
while more_examples:
UpperCamelCase : Union[str, Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A_ )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCamelCase : Optional[int] = False
break
UpperCamelCase : Dict = tokenizer(A_ , truncation=A_ )["input_ids"]
UpperCamelCase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(A_ ) , self.seq_length ):
UpperCamelCase : Optional[Any] = all_token_ids[i : i + self.seq_length]
if len(A_ ) == self.seq_length:
yield torch.tensor(A_ )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = {"streaming": True}
UpperCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
UpperCamelCase : List[str] = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
UpperCamelCase : List[str] = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
model.eval()
UpperCamelCase : Any = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
UpperCamelCase : str = model(_lowerCAmelCase , labels=_lowerCAmelCase )
UpperCamelCase : Dict = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCamelCase : Optional[int] = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
UpperCamelCase : int = torch.exp(_lowerCAmelCase )
except OverflowError:
UpperCamelCase : Optional[Any] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
__lowerCamelCase : Tuple = Accelerator()
# Parse configuration
__lowerCamelCase : Tuple = HfArgumentParser(EvaluationArguments)
__lowerCamelCase : List[str] = parser.parse_args()
set_seed(args.seed)
# Logging
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
__lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__lowerCamelCase : Union[str, Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
__lowerCamelCase : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
__lowerCamelCase : int = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 703
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 0
|
from __future__ import annotations
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
UpperCamelCase : Optional[int] = get_failure_array(_lowerCAmelCase )
# 2) Step through text searching for pattern
UpperCamelCase : Optional[Any] = 0, 0 # index into text, pattern
while i < len(_lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase : str = failure[j - 1]
continue
i += 1
return False
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : List[Any] = [0]
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = 1
while j < len(_lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase : Optional[int] = failure[i - 1]
continue
j += 1
failure.append(_lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCamelCase : Optional[Any] = """abc1abc12"""
__lowerCamelCase : int = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__lowerCamelCase : Tuple = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCamelCase : List[Any] = """ABABX"""
__lowerCamelCase : Dict = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
__lowerCamelCase : Optional[int] = """AAAB"""
__lowerCamelCase : Any = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
__lowerCamelCase : Optional[Any] = """abcdabcy"""
__lowerCamelCase : Optional[Any] = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
__lowerCamelCase : str = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : int = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 705
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCamelCase : str = None
try:
import msvcrt
except ImportError:
__lowerCamelCase : str = None
try:
import fcntl
except ImportError:
__lowerCamelCase : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
__lowerCamelCase : str = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCamelCase : Union[str, Any] = """3.0.12"""
__lowerCamelCase : Any = None
def A_ ( ) -> List[Any]:
global _logger
UpperCamelCase : Any = _logger or logging.getLogger(__name__ )
return _logger
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.lock.release()
return None
class A__ :
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
UpperCamelCase : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Tuple = None
# The default timeout value.
UpperCamelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Dict = 0
return None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = float(A_ )
return None
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def __UpperCamelCase( self , A_=None , A_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : Dict = id(self )
UpperCamelCase : List[str] = self._lock_file
UpperCamelCase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : List[Any] = id(self )
UpperCamelCase : Dict = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCamelCase : Dict = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=A_ )
return None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
UpperCamelCase : Optional[int] = os.path.dirname(A_ )
UpperCamelCase : int = str(hash(A_ ) )
UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
UpperCamelCase : Optional[Any] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._lock_file_fd
UpperCamelCase : str = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : int = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
UpperCamelCase : List[str] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ )
except OSError:
pass
else:
UpperCamelCase : Tuple = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCamelCase : Dict = None
if msvcrt:
__lowerCamelCase : Any = WindowsFileLock
elif fcntl:
__lowerCamelCase : Any = UnixFileLock
else:
__lowerCamelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 38
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
UpperCamelCase : Optional[int] = {
"input_ids": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCamelCase : Optional[Any] = model(A_ )["last_hidden_state"]
UpperCamelCase : Optional[int] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
UpperCamelCase : Any = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 706
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A__ ( __snake_case ):
_UpperCAmelCase :int = 'xlm-roberta-xl'
def __init__( self , A_=25_0880 , A_=2560 , A_=36 , A_=32 , A_=1_0240 , A_="gelu" , A_=0.1 , A_=0.1 , A_=514 , A_=1 , A_=0.02 , A_=1e-05 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Any = type_vocab_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Optional[Any] = layer_norm_eps
UpperCamelCase : Any = position_embedding_type
UpperCamelCase : List[str] = use_cache
UpperCamelCase : List[Any] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 707
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : str = 13
UpperCamelCase : int = 7
UpperCamelCase : str = True
UpperCamelCase : Dict = True
UpperCamelCase : str = True
UpperCamelCase : Tuple = True
UpperCamelCase : List[str] = 99
UpperCamelCase : Optional[Any] = 384
UpperCamelCase : Tuple = 2
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Dict = 37
UpperCamelCase : Any = "gelu"
UpperCamelCase : List[Any] = 0.1
UpperCamelCase : int = 0.1
UpperCamelCase : Tuple = 512
UpperCamelCase : List[Any] = 16
UpperCamelCase : int = 2
UpperCamelCase : Dict = 0.02
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : List[Any] = 4
UpperCamelCase : Dict = 128
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = 9
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : str = None
if self.use_input_mask:
UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel(config=A_ )
UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Optional[int] = [input_ids, input_mask]
UpperCamelCase : Any = model(A_ )
UpperCamelCase : int = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_choices
UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TFConvBertModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = True
if hasattr(A_ , "use_cache" ):
UpperCamelCase : List[str] = True
UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ )
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Optional[int] = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" )
UpperCamelCase : Dict = tf.keras.models.load_model(A_ )
UpperCamelCase : str = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"]
UpperCamelCase : Any = outputs["encoder_attentions"]
else:
UpperCamelCase : Any = outputs["hidden_states"]
UpperCamelCase : List[str] = outputs["attentions"]
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase : Optional[Any] = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase : Any = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[Any] = False
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : List[str] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = model_class(A_ )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : List[str] = model(A_ )[0]
UpperCamelCase : int = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[str] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 38
| 0
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = 'token-classification'
def __init__( self , A_ ):
'''simple docstring'''
if type(A_ ) == dict:
UpperCamelCase : Any = Namespace(**A_ )
UpperCamelCase : Tuple = import_module("tasks" )
try:
UpperCamelCase : Dict = getattr(A_ , hparams.task_type )
UpperCamelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
UpperCamelCase : List[Any] = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase : Any = CrossEntropyLoss().ignore_index
super().__init__(A_ , len(self.labels ) , self.mode )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return self.model(**A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase : str = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase : List[str] = self(**A_ )
UpperCamelCase : Optional[int] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase : str = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , A_ )
UpperCamelCase : Union[str, Any] = torch.load(A_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCamelCase : int = self.token_classification_task.read_examples_from_file(args.data_dir , A_ )
UpperCamelCase : Optional[Any] = self.token_classification_task.convert_examples_to_features(
A_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , A_ )
torch.save(A_ , A_ )
def __UpperCamelCase( self , A_ , A_ , A_ = False ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self._feature_file(A_ )
logger.info("Loading features from cached file %s" , A_ )
UpperCamelCase : str = torch.load(A_ )
UpperCamelCase : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase : List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase : str = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase : Tuple = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
"""Compute validation""" ""
UpperCamelCase : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase : List[Any] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase : Optional[Any] = self(**A_ )
UpperCamelCase : Tuple = outputs[:2]
UpperCamelCase : Optional[Any] = logits.detach().cpu().numpy()
UpperCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = torch.stack([x["val_loss"] for x in outputs] ).mean()
UpperCamelCase : List[str] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
UpperCamelCase : List[str] = np.argmax(A_ , axis=2 )
UpperCamelCase : Union[str, Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCamelCase : List[str] = dict(enumerate(self.labels ) )
UpperCamelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase : Union[str, Any] = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(A_ , A_ ),
"precision": precision_score(A_ , A_ ),
"recall": recall_score(A_ , A_ ),
"f1": fa_score(A_ , A_ ),
}
UpperCamelCase : Tuple = dict(results.items() )
UpperCamelCase : int = results
return ret, preds_list, out_label_list
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = self._eval_end(A_ )
UpperCamelCase : Dict = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self._eval_end(A_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase : Tuple = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
"--task_type" , default="NER" , type=A_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=A_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=A_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowerCamelCase : Union[str, Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Any = NERTransformer(args)
__lowerCamelCase : Tuple = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowerCamelCase : List[Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
__lowerCamelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 708
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'camembert'
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = use_cache
UpperCamelCase : List[str] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 38
| 0
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Any = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[int] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(_lowerCAmelCase , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Any = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[int] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[int] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Any = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Any = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[int] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
| 709
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
'''simple docstring'''
def A_ ( ) -> Optional[Any]:
UpperCamelCase : List[str] = []
UpperCamelCase : Any = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
UpperCamelCase : Any = "".join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 710
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase :Tuple = 'BlipImageProcessor'
_UpperCAmelCase :Optional[int] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : int = self.tokenizer
UpperCamelCase : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38
| 0
|
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[Any] = [True] * limit
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase : Optional[Any] = i * 2
while index < limit:
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = index + i
UpperCamelCase : Any = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 100_0000 ) -> int:
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase : int = j - i
UpperCamelCase : Dict = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 711
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = tempfile.mkdtemp()
UpperCamelCase : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCamelCase : Tuple = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
"do_convert_rgb": True,
}
UpperCamelCase : int = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A_ , A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase : List[str] = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.get_tokenizer()
UpperCamelCase : List[str] = self.get_rust_tokenizer()
UpperCamelCase : List[Any] = self.get_image_processor()
UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
UpperCamelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Any = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
UpperCamelCase : List[str] = self.get_image_processor(do_normalize=A_ )
UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.get_image_processor()
UpperCamelCase : Tuple = self.get_tokenizer()
UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Optional[int] = self.prepare_image_inputs()
UpperCamelCase : Tuple = image_processor(A_ , return_tensors="np" )
UpperCamelCase : List[str] = processor(images=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.get_image_processor()
UpperCamelCase : Dict = self.get_tokenizer()
UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Any = "Alexandra,T-shirt的价格是15便士。"
UpperCamelCase : List[str] = processor(text=A_ )
UpperCamelCase : Optional[int] = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : Optional[int] = self.get_tokenizer()
UpperCamelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : List[Any] = "Alexandra,T-shirt的价格是15便士。"
UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase : List[str] = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : Optional[int] = self.get_tokenizer()
UpperCamelCase : str = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : Any = processor.batch_decode(A_ )
UpperCamelCase : Optional[int] = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.get_image_processor()
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : List[str] = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Any = "Alexandra,T-shirt的价格是15便士。"
UpperCamelCase : Tuple = self.prepare_image_inputs()
UpperCamelCase : int = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 712
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar("""KT""")
__lowerCamelCase : Dict = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
UpperCamelCase : int = key
UpperCamelCase : List[Any] = value
UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"""Node({self.key}: {self.value})"""
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = p
UpperCamelCase : List[str] = max_level
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = list(self )
if len(A_ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 )
UpperCamelCase : Dict = max(A_ , 4 ) + 4
UpperCamelCase : str = self.head
UpperCamelCase : List[Any] = []
UpperCamelCase : int = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) )
lines.append(" " * label_size + "| " * len(A_ ) )
while len(node.forward ) != 0:
UpperCamelCase : Union[str, Any] = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(A_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(A_ ) )
UpperCamelCase : Tuple = node.forward
lines.append("None".ljust(A_ ) + "* " * len(A_ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase : Union[str, Any] = node.forward[0]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase : Tuple = node.forward[i]
else:
UpperCamelCase : List[Any] = update_node.forward[:i]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ )
if node is not None:
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
UpperCamelCase : Optional[int] = level
UpperCamelCase : Dict = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
UpperCamelCase : List[Any] = new_node
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ )
if node is not None:
return node.value
return None
def A_ ( ) -> List[Any]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCamelCase : Optional[int] = skip_list.head
UpperCamelCase : List[str] = {}
while node.level != 0:
UpperCamelCase : str = node.forward[0]
UpperCamelCase : Optional[int] = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> List[Any]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCamelCase : Dict = skip_list.head
UpperCamelCase : Tuple = {}
while node.level != 0:
UpperCamelCase : List[str] = node.forward[0]
UpperCamelCase : Dict = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def A_ ( ) -> Dict:
UpperCamelCase : Optional[int] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> Dict:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[str]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Union[str, Any]:
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
UpperCamelCase : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def A_ ( ) -> Tuple:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : List[str] = parent
UpperCamelCase : Union[str, Any] = batch_size
UpperCamelCase : Union[str, Any] = seq_length
UpperCamelCase : List[Any] = is_training
UpperCamelCase : Optional[int] = use_input_mask
UpperCamelCase : List[Any] = use_token_type_ids
UpperCamelCase : int = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : List[Any] = embedding_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : str = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Dict = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = max_position_embeddings
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : Any = initializer_range
UpperCamelCase : int = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : Union[str, Any] = scope
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Dict = None
if self.use_input_mask:
UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : List[Any] = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Tuple = None
UpperCamelCase : int = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = MegatronBertModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Any = model(A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase : Optional[Any] = model(A_ , token_type_ids=A_ )
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = MegatronBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MegatronBertForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = MegatronBertForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = MegatronBertForPreTraining(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = MegatronBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = self.num_labels
UpperCamelCase : Any = MegatronBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = self.num_labels
UpperCamelCase : int = MegatronBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_choices
UpperCamelCase : int = MegatronBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
UpperCamelCase
) : str = config_and_inputs
UpperCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :int = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :List[Any] = True
# test_resize_embeddings = False
_UpperCAmelCase :Optional[Any] = False
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Any = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
UpperCamelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = MegatronBertModelTester(self )
UpperCamelCase : str = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A_ )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return torch.tensor(
_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase , )
__lowerCamelCase : int = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCamelCase : Any = os.path.join(os.environ["MYDIR"] , A_ )
UpperCamelCase : str = MegatronBertModel.from_pretrained(A_ )
model.to(A_ )
model.half()
UpperCamelCase : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase : str = model(A_ )[0]
UpperCamelCase : Optional[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[str] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase : List[str] = output[0, ii, jj]
UpperCamelCase : int = expected[3 * ii + jj]
UpperCamelCase : int = "ii={} jj={} a={} b={}".format(A_ , A_ , A_ , A_ )
self.assertTrue(math.isclose(A_ , A_ , rel_tol=A_ , abs_tol=A_ ) , msg=A_ )
| 713
|
from PIL import Image
def A_ ( _lowerCAmelCase ) -> Image:
UpperCamelCase , UpperCamelCase : List[Any] = image.size
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 38
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A__ ( __snake_case ):
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Any = []
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_init_end" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_train_begin" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_train_end" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_epoch_begin" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_epoch_end" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_step_begin" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_step_end" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_evaluate" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_predict" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_save" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_log" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_prediction_step" )
@require_torch
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = tempfile.mkdtemp()
def __UpperCamelCase( self ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def __UpperCamelCase( self , A_=0 , A_=0 , A_=64 , A_=64 , A_=None , A_=False , **A_ ):
'''simple docstring'''
UpperCamelCase : Dict = RegressionDataset(length=A_ )
UpperCamelCase : int = RegressionDataset(length=A_ )
UpperCamelCase : List[Any] = RegressionModelConfig(a=A_ , b=A_ )
UpperCamelCase : Any = RegressionPreTrainedModel(A_ )
UpperCamelCase : Any = TrainingArguments(self.output_dir , disable_tqdm=A_ , report_to=[] , **A_ )
return Trainer(
A_ , A_ , train_dataset=A_ , eval_dataset=A_ , callbacks=A_ , )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
self.assertEqual(len(A_ ) , len(A_ ) )
# Order doesn't matter
UpperCamelCase : Optional[int] = sorted(A_ , key=lambda A_ : cb.__name__ if isinstance(A_ , A_ ) else cb.__class__.__name__ )
UpperCamelCase : Optional[Any] = sorted(A_ , key=lambda A_ : cb.__name__ if isinstance(A_ , A_ ) else cb.__class__.__name__ )
for cba, cba in zip(A_ , A_ ):
if isinstance(A_ , A_ ) and isinstance(A_ , A_ ):
self.assertEqual(A_ , A_ )
elif isinstance(A_ , A_ ) and not isinstance(A_ , A_ ):
self.assertEqual(A_ , cba.__class__ )
elif not isinstance(A_ , A_ ) and isinstance(A_ , A_ ):
self.assertEqual(cba.__class__ , A_ )
else:
self.assertEqual(A_ , A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = ["on_init_end", "on_train_begin"]
UpperCamelCase : int = 0
UpperCamelCase : str = len(trainer.get_eval_dataloader() )
UpperCamelCase : Tuple = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.get_trainer()
UpperCamelCase : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
# Callbacks passed at init are added to the default callbacks
UpperCamelCase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCamelCase : List[str] = self.get_trainer(disable_tqdm=A_ )
UpperCamelCase : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCamelCase : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A_ )
expected_callbacks.remove(A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
UpperCamelCase : List[Any] = self.get_trainer()
UpperCamelCase : Optional[int] = trainer.pop_callback(A_ )
self.assertEqual(cb.__class__ , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
trainer.add_callback(A_ )
expected_callbacks.insert(0 , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
# We can also add, pop, or remove by instance
UpperCamelCase : Dict = self.get_trainer()
UpperCamelCase : int = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A_ )
expected_callbacks.remove(A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
UpperCamelCase : List[Any] = self.get_trainer()
UpperCamelCase : Tuple = trainer.callback_handler.callbacks[0]
UpperCamelCase : Optional[Any] = trainer.pop_callback(A_ )
self.assertEqual(A_ , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
trainer.add_callback(A_ )
expected_callbacks.insert(0 , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A_ )
UpperCamelCase : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCamelCase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
# Independent log/save/eval
UpperCamelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCamelCase : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
UpperCamelCase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCamelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
UpperCamelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
UpperCamelCase : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
UpperCamelCase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
UpperCamelCase : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
# A bit of everything
UpperCamelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
UpperCamelCase : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
UpperCamelCase : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A_ ) in warn_mock.call_args[0][0]
| 714
|
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 715
|
from __future__ import annotations
__lowerCamelCase : Optional[int] = """Muhammad Umer Farooq"""
__lowerCamelCase : Tuple = """MIT"""
__lowerCamelCase : Optional[int] = """1.0.0"""
__lowerCamelCase : int = """Muhammad Umer Farooq"""
__lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me"""
__lowerCamelCase : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : str = domain
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : Any = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def A_ ( _lowerCAmelCase ) -> str:
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] )
def A_ ( _lowerCAmelCase ) -> str:
return parse.urlparse(_lowerCAmelCase ).netloc
def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]:
UpperCamelCase : int = get_domain_name(_lowerCAmelCase )
# Initialize the parser
UpperCamelCase : str = Parser(_lowerCAmelCase )
try:
# Open URL
UpperCamelCase : int = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase )
# Get the valid email.
UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 38
| 0
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCamelCase : str = None
try:
import msvcrt
except ImportError:
__lowerCamelCase : str = None
try:
import fcntl
except ImportError:
__lowerCamelCase : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
__lowerCamelCase : str = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCamelCase : Union[str, Any] = """3.0.12"""
__lowerCamelCase : Any = None
def A_ ( ) -> List[Any]:
global _logger
UpperCamelCase : Any = _logger or logging.getLogger(__name__ )
return _logger
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.lock.release()
return None
class A__ :
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
UpperCamelCase : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Tuple = None
# The default timeout value.
UpperCamelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Dict = 0
return None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = float(A_ )
return None
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def __UpperCamelCase( self , A_=None , A_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : Dict = id(self )
UpperCamelCase : List[str] = self._lock_file
UpperCamelCase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : List[Any] = id(self )
UpperCamelCase : Dict = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCamelCase : Dict = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=A_ )
return None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
UpperCamelCase : Optional[int] = os.path.dirname(A_ )
UpperCamelCase : int = str(hash(A_ ) )
UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
UpperCamelCase : Optional[Any] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._lock_file_fd
UpperCamelCase : str = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : int = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
UpperCamelCase : List[str] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ )
except OSError:
pass
else:
UpperCamelCase : Tuple = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCamelCase : Dict = None
if msvcrt:
__lowerCamelCase : Any = WindowsFileLock
elif fcntl:
__lowerCamelCase : Any = UnixFileLock
else:
__lowerCamelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 716
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[Any] = [True] * limit
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase : Optional[Any] = i * 2
while index < limit:
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = index + i
UpperCamelCase : Any = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 100_0000 ) -> int:
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase : int = j - i
UpperCamelCase : Dict = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 38
| 0
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[Any] = 'AutoTokenizer'
_UpperCAmelCase :Tuple = ['tokenizer']
_UpperCAmelCase :int = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self , A_ , A_=None ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : List[Any] = speaker_embeddings
@classmethod
def __UpperCamelCase( cls , A_ , A_="speaker_embeddings_path.json" , **A_ ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
UpperCamelCase : Any = get_file_from_repo(
A_ , A_ , subfolder=kwargs.pop("subfolder" , A_ ) , cache_dir=kwargs.pop("cache_dir" , A_ ) , force_download=kwargs.pop("force_download" , A_ ) , proxies=kwargs.pop("proxies" , A_ ) , resume_download=kwargs.pop("resume_download" , A_ ) , local_files_only=kwargs.pop("local_files_only" , A_ ) , use_auth_token=kwargs.pop("use_auth_token" , A_ ) , revision=kwargs.pop("revision" , A_ ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(A_ , A_ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
UpperCamelCase : Optional[int] = None
else:
with open(A_ ) as speaker_embeddings_json:
UpperCamelCase : Optional[Any] = json.load(A_ )
else:
UpperCamelCase : Any = None
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(A_ , **A_ )
return cls(tokenizer=A_ , speaker_embeddings=A_ )
def __UpperCamelCase( self , A_ , A_="speaker_embeddings_path.json" , A_="speaker_embeddings" , A_ = False , **A_ , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A_ , A_ , "v2" ) , exist_ok=A_ )
UpperCamelCase : Dict = {}
UpperCamelCase : int = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCamelCase : Any = self._load_voice_preset(A_ )
UpperCamelCase : List[Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , A_ , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=A_ , )
UpperCamelCase : Any = os.path.join(A_ , F"""{prompt_key}_{key}.npy""" )
UpperCamelCase : List[Any] = tmp_dict
with open(os.path.join(A_ , A_ ) , "w" ) as fp:
json.dump(A_ , A_ )
super().save_pretrained(A_ , A_ , **A_ )
def __UpperCamelCase( self , A_ = None , **A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.speaker_embeddings[voice_preset]
UpperCamelCase : Dict = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
UpperCamelCase : int = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , A_ ) , cache_dir=kwargs.pop("cache_dir" , A_ ) , force_download=kwargs.pop("force_download" , A_ ) , proxies=kwargs.pop("proxies" , A_ ) , resume_download=kwargs.pop("resume_download" , A_ ) , local_files_only=kwargs.pop("local_files_only" , A_ ) , use_auth_token=kwargs.pop("use_auth_token" , A_ ) , revision=kwargs.pop("revision" , A_ ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
UpperCamelCase : Dict = np.load(A_ )
return voice_preset_dict
def __UpperCamelCase( self , A_ = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , A_=None , A_=None , A_="pt" , A_=256 , A_=False , A_=True , A_=False , **A_ , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(A_ , A_ ):
if (
isinstance(A_ , A_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCamelCase : Union[str, Any] = self._load_voice_preset(A_ )
else:
if isinstance(A_ , A_ ) and not voice_preset.endswith(".npz" ):
UpperCamelCase : Any = voice_preset + ".npz"
UpperCamelCase : Optional[int] = np.load(A_ )
if voice_preset is not None:
self._validate_voice_preset_dict(A_ , **A_ )
UpperCamelCase : Optional[int] = BatchFeature(data=A_ , tensor_type=A_ )
UpperCamelCase : Tuple = self.tokenizer(
A_ , return_tensors=A_ , padding="max_length" , max_length=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , add_special_tokens=A_ , **A_ , )
if voice_preset is not None:
UpperCamelCase : Tuple = voice_preset
return encoded_text
| 717
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(
features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
UpperCamelCase : Optional[int] = Generator(
cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
UpperCamelCase : int = self.builder.as_dataset(
split="train" , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
| 38
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def A_ ( _lowerCAmelCase ):
UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"""{test_file} instead.""" )
UpperCamelCase : int = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
UpperCamelCase : List[str] = components[:-1] + [test_fn.replace(".py" , "" )]
UpperCamelCase : Optional[int] = ".".join(_lowerCAmelCase )
return test_module_path
def A_ ( _lowerCAmelCase ):
UpperCamelCase : Optional[Any] = get_module_path(_lowerCAmelCase )
UpperCamelCase : Any = importlib.import_module(_lowerCAmelCase )
return test_module
def A_ ( _lowerCAmelCase ):
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def A_ ( _lowerCAmelCase ):
UpperCamelCase : Dict = []
UpperCamelCase : Union[str, Any] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
UpperCamelCase : Any = getattr(_lowerCAmelCase , _lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase : List[Any] = getattr(_lowerCAmelCase , "all_model_classes" , [] )
if len(_lowerCAmelCase ) > 0:
test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def A_ ( _lowerCAmelCase ):
UpperCamelCase : Tuple = get_test_classes(_lowerCAmelCase )
UpperCamelCase : Dict = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def A_ ( _lowerCAmelCase ):
UpperCamelCase : Dict = test_class()
if hasattr(_lowerCAmelCase , "setUp" ):
test.setUp()
UpperCamelCase : List[str] = None
if hasattr(_lowerCAmelCase , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase : List[Any] = test.model_tester.__class__
return model_tester
def A_ ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : int = get_test_classes(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : str = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : str = []
for test_class in test_classes:
UpperCamelCase : List[str] = get_model_tester_from_test_class(_lowerCAmelCase )
if tester_class is not None:
tester_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def A_ ( _lowerCAmelCase ):
UpperCamelCase : str = get_test_classes(_lowerCAmelCase )
UpperCamelCase : str = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def A_ ( _lowerCAmelCase ):
UpperCamelCase : str = get_model_classes(_lowerCAmelCase )
UpperCamelCase : Tuple = {
model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def A_ ( _lowerCAmelCase ):
UpperCamelCase : int = get_model_classes(_lowerCAmelCase )
UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def A_ ( _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o.__name__
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_json(_lowerCAmelCase ) for x in o]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 718
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 38
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = CycleDiffusionPipeline
_UpperCAmelCase :Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
_UpperCAmelCase :List[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
_UpperCAmelCase :int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
_UpperCAmelCase :str = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCamelCase : Dict = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase : Dict = CLIPTextModel(A_ )
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Dict = image / 2 + 0.5
if str(A_ ).startswith("mps" ):
UpperCamelCase : Dict = torch.manual_seed(A_ )
else:
UpperCamelCase : List[str] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Tuple = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : List[Any] = CycleDiffusionPipeline(**A_ )
UpperCamelCase : int = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[str] = self.get_dummy_inputs(A_ )
UpperCamelCase : List[str] = pipe(**A_ )
UpperCamelCase : Optional[Any] = output.images
UpperCamelCase : Any = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCamelCase : Union[str, Any] = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.get_dummy_components()
for name, module in components.items():
if hasattr(A_ , "half" ):
UpperCamelCase : List[Any] = module.half()
UpperCamelCase : int = CycleDiffusionPipeline(**A_ )
UpperCamelCase : Optional[Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Tuple = self.get_dummy_inputs(A_ )
UpperCamelCase : str = pipe(**A_ )
UpperCamelCase : Dict = output.images
UpperCamelCase : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCamelCase : List[Any] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __UpperCamelCase( self ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def __UpperCamelCase( self ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def __UpperCamelCase( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCamelCase( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __UpperCamelCase( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
UpperCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
UpperCamelCase : Union[str, Any] = init_image.resize((512, 512) )
UpperCamelCase : str = "CompVis/stable-diffusion-v1-4"
UpperCamelCase : Union[str, Any] = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" )
UpperCamelCase : Dict = CycleDiffusionPipeline.from_pretrained(
A_ , scheduler=A_ , safety_checker=A_ , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
UpperCamelCase : List[str] = "A black colored car"
UpperCamelCase : str = "A blue colored car"
UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
UpperCamelCase : Any = pipe(
prompt=A_ , source_prompt=A_ , image=A_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=A_ , output_type="np" , )
UpperCamelCase : Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
UpperCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
UpperCamelCase : Union[str, Any] = init_image.resize((512, 512) )
UpperCamelCase : str = "CompVis/stable-diffusion-v1-4"
UpperCamelCase : List[Any] = DDIMScheduler.from_pretrained(A_ , subfolder="scheduler" )
UpperCamelCase : Optional[int] = CycleDiffusionPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
UpperCamelCase : List[Any] = "A black colored car"
UpperCamelCase : Tuple = "A blue colored car"
UpperCamelCase : List[str] = torch.manual_seed(0 )
UpperCamelCase : List[str] = pipe(
prompt=A_ , source_prompt=A_ , image=A_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=A_ , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 719
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 38
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 720
|
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str:
UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def A_ ( _lowerCAmelCase ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.keras.activations.gelu
__lowerCamelCase : int = approximate_gelu_wrap
else:
__lowerCamelCase : List[Any] = _gelu
__lowerCamelCase : Optional[Any] = _gelu_new
__lowerCamelCase : Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 38
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase :Union[str, Any] = LongformerTokenizer
_UpperCAmelCase :Optional[Any] = True
_UpperCAmelCase :Any = LongformerTokenizerFast
_UpperCAmelCase :List[str] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase : List[Any] = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase : Optional[Any] = {"unk_token": "<unk>"}
UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = "lower newer"
UpperCamelCase : str = "lower newer"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase : str = "lower newer"
UpperCamelCase : Tuple = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCamelCase : int = tokenizer.tokenize(A_ ) # , add_prefix_space=True)
self.assertListEqual(A_ , A_ )
UpperCamelCase : Optional[int] = tokens + [tokenizer.unk_token]
UpperCamelCase : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=A_ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=A_ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
UpperCamelCase : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=A_ )
UpperCamelCase : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=A_ )
UpperCamelCase : Optional[Any] = tokenizer.encode(
"sequence builders" , add_special_tokens=A_ , add_prefix_space=A_ )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=A_ , add_prefix_space=A_ )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_tokenizer()
UpperCamelCase : Dict = "Encode this sequence."
UpperCamelCase : Optional[int] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
UpperCamelCase : Optional[int] = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
UpperCamelCase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A_ , A_ )
UpperCamelCase : Union[str, Any] = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A_ , A_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
UpperCamelCase : Dict = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A_ , A_ )
# Testing spaces after special tokens
UpperCamelCase : Union[str, Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(A_ , lstrip=A_ , rstrip=A_ )} ) # mask token has a left space
UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(A_ )
UpperCamelCase : Any = "Encode <mask> sequence"
UpperCamelCase : Optional[int] = "Encode <mask>sequence"
UpperCamelCase : List[Any] = tokenizer.encode(A_ )
UpperCamelCase : Optional[int] = encoded.index(A_ )
UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A_ , A_ )
UpperCamelCase : Dict = tokenizer.encode(A_ )
UpperCamelCase : List[str] = encoded.index(A_ )
UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase : Any = "A, <mask> AllenNLP sentence."
UpperCamelCase : int = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
UpperCamelCase : Any = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
A_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCamelCase( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
UpperCamelCase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , A_ )
self.assertEqual(post_processor_state["add_prefix_space"] , A_ )
self.assertEqual(post_processor_state["trim_offsets"] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase : Tuple = F"""{text_of_1_token} {text_of_1_token}"""
UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
UpperCamelCase : Any = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
UpperCamelCase : Optional[int] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
UpperCamelCase : Union[str, Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
UpperCamelCase : Optional[int] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
UpperCamelCase : List[Any] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
UpperCamelCase : Dict = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ) + 1, 1 + len(A_ ) + 1 + len(A_ )) , )
UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
UpperCamelCase : Union[str, Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
UpperCamelCase : Optional[Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 0
|
from math import sqrt
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : Optional[int] = 0
for i in range(1 , int(sqrt(_lowerCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(_lowerCAmelCase ):
total += i + n // i
elif i == sqrt(_lowerCAmelCase ):
total += i
return total - n
def A_ ( _lowerCAmelCase = 1_0000 ) -> int:
UpperCamelCase : Dict = sum(
i
for i in range(1 , _lowerCAmelCase )
if sum_of_divisors(sum_of_divisors(_lowerCAmelCase ) ) == i and sum_of_divisors(_lowerCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 700
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ) -> Optional[int]:
UpperCamelCase : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38
| 0
|
from math import factorial
def A_ ( _lowerCAmelCase = 100 ):
return sum(int(_lowerCAmelCase ) for x in str(factorial(_lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 38
| 0
|
import requests
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
UpperCamelCase : List[Any] = {"Content-Type": "application/json"}
UpperCamelCase : Dict = requests.post(_lowerCAmelCase , json={"text": message_body} , headers=_lowerCAmelCase )
if response.status_code != 200:
UpperCamelCase : Tuple = (
"Request to slack returned an error "
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 703
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( __snake_case ):
@staticmethod
@abstractmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : int = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 705
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCamelCase : str = None
try:
import msvcrt
except ImportError:
__lowerCamelCase : str = None
try:
import fcntl
except ImportError:
__lowerCamelCase : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
__lowerCamelCase : str = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCamelCase : Union[str, Any] = """3.0.12"""
__lowerCamelCase : Any = None
def A_ ( ) -> List[Any]:
global _logger
UpperCamelCase : Any = _logger or logging.getLogger(__name__ )
return _logger
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.lock.release()
return None
class A__ :
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
UpperCamelCase : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Tuple = None
# The default timeout value.
UpperCamelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Dict = 0
return None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = float(A_ )
return None
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def __UpperCamelCase( self , A_=None , A_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : Dict = id(self )
UpperCamelCase : List[str] = self._lock_file
UpperCamelCase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : List[Any] = id(self )
UpperCamelCase : Dict = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCamelCase : Dict = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=A_ )
return None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
UpperCamelCase : Optional[int] = os.path.dirname(A_ )
UpperCamelCase : int = str(hash(A_ ) )
UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
UpperCamelCase : Optional[Any] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._lock_file_fd
UpperCamelCase : str = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : int = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
UpperCamelCase : List[str] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ )
except OSError:
pass
else:
UpperCamelCase : Tuple = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCamelCase : Dict = None
if msvcrt:
__lowerCamelCase : Any = WindowsFileLock
elif fcntl:
__lowerCamelCase : Any = UnixFileLock
else:
__lowerCamelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 38
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A__ ( __snake_case ):
_UpperCAmelCase :str = ['image_processor']
_UpperCAmelCase :Tuple = 'SamImageProcessor'
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : List[Any] = self.image_processor
UpperCamelCase : Tuple = -10
UpperCamelCase : Optional[int] = self.image_processor.size["longest_edge"]
def __call__( self , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processor(
A_ , return_tensors=A_ , **A_ , )
# pop arguments that are not used in the foward but used nevertheless
UpperCamelCase : Union[str, Any] = encoding_image_processor["original_sizes"]
if hasattr(A_ , "numpy" ): # Checks if Torch or TF tensor
UpperCamelCase : Tuple = original_sizes.numpy()
UpperCamelCase : Dict = self._check_and_preprocess_points(
input_points=A_ , input_labels=A_ , input_boxes=A_ , )
UpperCamelCase : List[str] = self._normalize_and_convert(
A_ , A_ , input_points=A_ , input_labels=A_ , input_boxes=A_ , return_tensors=A_ , )
return encoding_image_processor
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=None , A_=None , A_="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(A_ ) != len(A_ ):
UpperCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0] ) for point in input_points
]
else:
UpperCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size , A_ , A_ )
for point, original_size in zip(A_ , A_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCamelCase : int = self._pad_points_and_labels(A_ , A_ )
UpperCamelCase : Optional[Any] = np.array(A_ )
if input_labels is not None:
UpperCamelCase : Optional[Any] = np.array(A_ )
if input_boxes is not None:
if len(A_ ) != len(A_ ):
UpperCamelCase : Any = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0] , is_bounding_box=A_ )
for box in input_boxes
]
else:
UpperCamelCase : Dict = [
self._normalize_coordinates(self.target_size , A_ , A_ , is_bounding_box=A_ )
for box, original_size in zip(A_ , A_ )
]
UpperCamelCase : Dict = np.array(A_ )
if input_boxes is not None:
if return_tensors == "pt":
UpperCamelCase : Optional[int] = torch.from_numpy(A_ )
# boxes batch size of 1 by default
UpperCamelCase : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCamelCase : str = tf.convert_to_tensor(A_ )
# boxes batch size of 1 by default
UpperCamelCase : Any = tf.expand_dims(A_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCamelCase : int = torch.from_numpy(A_ )
# point batch size of 1 by default
UpperCamelCase : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCamelCase : Optional[int] = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
UpperCamelCase : Optional[Any] = tf.expand_dims(A_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCamelCase : str = torch.from_numpy(A_ )
# point batch size of 1 by default
UpperCamelCase : Optional[int] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCamelCase : Any = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
UpperCamelCase : Dict = tf.expand_dims(A_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = max([point.shape[0] for point in input_points] )
UpperCamelCase : List[Any] = []
for i, point in enumerate(A_ ):
if point.shape[0] != expected_nb_points:
UpperCamelCase : str = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCamelCase : Dict = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(A_ )
UpperCamelCase : Union[str, Any] = processed_input_points
return input_points, input_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Optional[int] = original_size
UpperCamelCase : str = self.image_processor._get_preprocess_shape(A_ , longest_edge=A_ )
UpperCamelCase : List[Any] = deepcopy(A_ ).astype(A_ )
if is_bounding_box:
UpperCamelCase : Optional[Any] = coords.reshape(-1 , 2 , 2 )
UpperCamelCase : Dict = coords[..., 0] * (new_w / old_w)
UpperCamelCase : int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCamelCase : str = coords.reshape(-1 , 4 )
return coords
def __UpperCamelCase( self , A_=None , A_=None , A_=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(A_ , "numpy" ): # Checks for TF or Torch tensor
UpperCamelCase : List[Any] = input_points.numpy().tolist()
if not isinstance(A_ , A_ ) or not isinstance(input_points[0] , A_ ):
raise ValueError("Input points must be a list of list of floating points." )
UpperCamelCase : Any = [np.array(A_ ) for input_point in input_points]
else:
UpperCamelCase : Union[str, Any] = None
if input_labels is not None:
if hasattr(A_ , "numpy" ):
UpperCamelCase : str = input_labels.numpy().tolist()
if not isinstance(A_ , A_ ) or not isinstance(input_labels[0] , A_ ):
raise ValueError("Input labels must be a list of list integers." )
UpperCamelCase : str = [np.array(A_ ) for label in input_labels]
else:
UpperCamelCase : Optional[Any] = None
if input_boxes is not None:
if hasattr(A_ , "numpy" ):
UpperCamelCase : Union[str, Any] = input_boxes.numpy().tolist()
if (
not isinstance(A_ , A_ )
or not isinstance(input_boxes[0] , A_ )
or not isinstance(input_boxes[0][0] , A_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
UpperCamelCase : Optional[int] = [np.array(A_ ).astype(np.floataa ) for box in input_boxes]
else:
UpperCamelCase : Dict = None
return input_points, input_labels, input_boxes
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(A_ ) )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_masks(*A_ , **A_ )
| 706
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase : Optional[int] = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : str = 13
UpperCamelCase : int = 7
UpperCamelCase : str = True
UpperCamelCase : Dict = True
UpperCamelCase : str = True
UpperCamelCase : Tuple = True
UpperCamelCase : List[str] = 99
UpperCamelCase : Optional[Any] = 384
UpperCamelCase : Tuple = 2
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Dict = 37
UpperCamelCase : Any = "gelu"
UpperCamelCase : List[Any] = 0.1
UpperCamelCase : int = 0.1
UpperCamelCase : Tuple = 512
UpperCamelCase : List[Any] = 16
UpperCamelCase : int = 2
UpperCamelCase : Dict = 0.02
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : List[Any] = 4
UpperCamelCase : Dict = 128
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = 9
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : str = None
if self.use_input_mask:
UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel(config=A_ )
UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Optional[int] = [input_ids, input_mask]
UpperCamelCase : Any = model(A_ )
UpperCamelCase : int = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_choices
UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TFConvBertModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = True
if hasattr(A_ , "use_cache" ):
UpperCamelCase : List[str] = True
UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ )
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Optional[int] = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" )
UpperCamelCase : Dict = tf.keras.models.load_model(A_ )
UpperCamelCase : str = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"]
UpperCamelCase : Any = outputs["encoder_attentions"]
else:
UpperCamelCase : Any = outputs["hidden_states"]
UpperCamelCase : List[str] = outputs["attentions"]
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase : Optional[Any] = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase : Any = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[Any] = False
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : List[str] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = model_class(A_ )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : List[str] = model(A_ )[0]
UpperCamelCase : int = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[str] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 38
| 0
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__lowerCamelCase : Union[str, Any] = get_logger(__name__)
class A__ :
_UpperCAmelCase :Union[str, Any] = 'dummy_data'
_UpperCAmelCase :Optional[int] = 'datasets'
_UpperCAmelCase :Dict = False
def __init__( self , A_ , A_ , A_ , A_ = None , A_ = False , A_ = True , A_ = None , ):
'''simple docstring'''
UpperCamelCase : List[str] = 0
UpperCamelCase : List[Any] = dataset_name
UpperCamelCase : List[str] = cache_dir
UpperCamelCase : Optional[int] = use_local_dummy_data
UpperCamelCase : Union[str, Any] = config
# download_callbacks take a single url as input
UpperCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCamelCase : Optional[int] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCamelCase : Optional[Any] = str(A_ )
# to be downloaded
UpperCamelCase : List[Any] = None
UpperCamelCase : Tuple = None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self._dummy_file is None:
UpperCamelCase : int = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCamelCase : List[Any] = cached_path(
A_ , cache_dir=self.cache_dir , extract_compressed_file=A_ , force_extract=A_ )
return os.path.join(A_ , self.dummy_file_name )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self._bucket_url is None:
UpperCamelCase : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def __UpperCamelCase( self , A_ , *A_ ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCamelCase : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCamelCase : List[str] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A_ , A_ ):
return self.create_dummy_data_dict(A_ , A_ )
elif isinstance(A_ , (list, tuple) ):
return self.create_dummy_data_list(A_ , A_ )
else:
return self.create_dummy_data_single(A_ , A_ )
def __UpperCamelCase( self , A_ , *A_ ):
'''simple docstring'''
return self.download_and_extract(A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
return self.download_and_extract(A_ )
def __UpperCamelCase( self , A_ , *A_ , **A_ ):
'''simple docstring'''
return path
def __UpperCamelCase( self ):
'''simple docstring'''
return {}
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A_ , A_ ):
for single_url in single_urls:
download_callback(A_ )
else:
UpperCamelCase : int = single_urls
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A_ , A_ ):
UpperCamelCase : Optional[Any] = [os.path.join(A_ , urllib.parse.quote_plus(Path(A_ ).name ) ) for x in single_urls]
else:
UpperCamelCase : List[str] = single_urls
UpperCamelCase : int = os.path.join(A_ , urllib.parse.quote_plus(Path(A_ ).name ) )
UpperCamelCase : Optional[int] = value
# make sure that values are unique
if all(isinstance(A_ , A_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCamelCase : str = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , A_ ) ) for url in data_url )
UpperCamelCase : Union[str, Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCamelCase : List[Any] = [data_url[0]] * len(A_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCamelCase : int = os.path.join(A_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(A_ )
return dummy_data_list
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCamelCase : List[str] = os.path.join(A_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(A_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
def _iter_archive_members(A_ ):
# this preserves the order of the members inside the ZIP archive
UpperCamelCase : List[str] = Path(self.dummy_file ).parent
UpperCamelCase : Optional[Any] = path.relative_to(A_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCamelCase : Union[str, Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A_ )
UpperCamelCase : Tuple = Path(A_ )
UpperCamelCase : Any = _iter_archive_members(A_ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(A_ ).as_posix(), file_path.open("rb" )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
UpperCamelCase : Optional[int] = [paths]
for path in paths:
if os.path.isfile(A_ ):
if os.path.basename(A_ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A_ ):
if os.path.basename(A_ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(A_ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(A_ , A_ )
| 708
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'camembert'
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = use_cache
UpperCamelCase : List[str] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 38
| 0
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
UpperCamelCase : int = len(_lowerCAmelCase )
UpperCamelCase : List[str] = len(_lowerCAmelCase )
UpperCamelCase : Any = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase : Union[str, Any] = True
for i in range(_lowerCAmelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase : List[str] = True
if a[i].islower():
UpperCamelCase : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar("""KT""")
__lowerCamelCase : Dict = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
UpperCamelCase : int = key
UpperCamelCase : List[Any] = value
UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"""Node({self.key}: {self.value})"""
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = p
UpperCamelCase : List[str] = max_level
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = list(self )
if len(A_ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 )
UpperCamelCase : Dict = max(A_ , 4 ) + 4
UpperCamelCase : str = self.head
UpperCamelCase : List[Any] = []
UpperCamelCase : int = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) )
lines.append(" " * label_size + "| " * len(A_ ) )
while len(node.forward ) != 0:
UpperCamelCase : Union[str, Any] = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(A_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(A_ ) )
UpperCamelCase : Tuple = node.forward
lines.append("None".ljust(A_ ) + "* " * len(A_ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase : Union[str, Any] = node.forward[0]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase : Tuple = node.forward[i]
else:
UpperCamelCase : List[Any] = update_node.forward[:i]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self._locate_node(A_ )
if node is not None:
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
UpperCamelCase : Optional[int] = level
UpperCamelCase : Dict = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
UpperCamelCase : List[Any] = new_node
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self._locate_node(A_ )
if node is not None:
return node.value
return None
def A_ ( ) -> List[Any]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCamelCase : Optional[int] = skip_list.head
UpperCamelCase : List[str] = {}
while node.level != 0:
UpperCamelCase : str = node.forward[0]
UpperCamelCase : Optional[int] = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> List[Any]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCamelCase : Dict = skip_list.head
UpperCamelCase : Tuple = {}
while node.level != 0:
UpperCamelCase : List[str] = node.forward[0]
UpperCamelCase : Dict = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def A_ ( ) -> Dict:
UpperCamelCase : Optional[int] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> Dict:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[str]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Union[str, Any]:
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
UpperCamelCase : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def A_ ( ) -> Tuple:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 710
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase :Tuple = 'BlipImageProcessor'
_UpperCAmelCase :Optional[int] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : int = self.tokenizer
UpperCamelCase : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38
| 0
|
'''simple docstring'''
from math import factorial
__lowerCamelCase : List[Any] = {str(d): factorial(d) for d in range(10)}
def A_ ( _lowerCAmelCase ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(_lowerCAmelCase ) )
def A_ ( ) -> int:
UpperCamelCase : Optional[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _lowerCAmelCase ) if sum_of_digit_factorial(_lowerCAmelCase ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 711
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar("""KT""")
__lowerCamelCase : Dict = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
UpperCamelCase : int = key
UpperCamelCase : List[Any] = value
UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"""Node({self.key}: {self.value})"""
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = p
UpperCamelCase : List[str] = max_level
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = list(self )
if len(A_ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 )
UpperCamelCase : Dict = max(A_ , 4 ) + 4
UpperCamelCase : str = self.head
UpperCamelCase : List[Any] = []
UpperCamelCase : int = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) )
lines.append(" " * label_size + "| " * len(A_ ) )
while len(node.forward ) != 0:
UpperCamelCase : Union[str, Any] = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(A_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(A_ ) )
UpperCamelCase : Tuple = node.forward
lines.append("None".ljust(A_ ) + "* " * len(A_ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase : Union[str, Any] = node.forward[0]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase : Tuple = node.forward[i]
else:
UpperCamelCase : List[Any] = update_node.forward[:i]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ )
if node is not None:
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
UpperCamelCase : Optional[int] = level
UpperCamelCase : Dict = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
UpperCamelCase : List[Any] = new_node
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ )
if node is not None:
return node.value
return None
def A_ ( ) -> List[Any]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCamelCase : Optional[int] = skip_list.head
UpperCamelCase : List[str] = {}
while node.level != 0:
UpperCamelCase : str = node.forward[0]
UpperCamelCase : Optional[int] = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> List[Any]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCamelCase : Dict = skip_list.head
UpperCamelCase : Tuple = {}
while node.level != 0:
UpperCamelCase : List[str] = node.forward[0]
UpperCamelCase : Dict = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def A_ ( ) -> Dict:
UpperCamelCase : Optional[int] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> Dict:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[str]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Union[str, Any]:
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
UpperCamelCase : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def A_ ( ) -> Tuple:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowerCamelCase : int = logging.get_logger(__name__)
class A__ ( __snake_case ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 713
|
from PIL import Image
def A_ ( _lowerCAmelCase ) -> Image:
UpperCamelCase , UpperCamelCase : List[Any] = image.size
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 38
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCamelCase : str = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 714
|
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def A_ ( _lowerCAmelCase , _lowerCAmelCase=1000 ) -> Optional[int]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCamelCase : Union[str, Any] = n - 1
UpperCamelCase : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCamelCase : Optional[int] = 0
while count < prec:
UpperCamelCase : Optional[int] = random.randint(2 , n - 1 )
UpperCamelCase : Dict = bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
UpperCamelCase : List[Any] = True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
UpperCamelCase : Optional[Any] = False
break
UpperCamelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 715
|
from __future__ import annotations
__lowerCamelCase : Optional[int] = """Muhammad Umer Farooq"""
__lowerCamelCase : Tuple = """MIT"""
__lowerCamelCase : Optional[int] = """1.0.0"""
__lowerCamelCase : int = """Muhammad Umer Farooq"""
__lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me"""
__lowerCamelCase : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : str = domain
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : Any = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def A_ ( _lowerCAmelCase ) -> str:
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] )
def A_ ( _lowerCAmelCase ) -> str:
return parse.urlparse(_lowerCAmelCase ).netloc
def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]:
UpperCamelCase : int = get_domain_name(_lowerCAmelCase )
# Initialize the parser
UpperCamelCase : str = Parser(_lowerCAmelCase )
try:
# Open URL
UpperCamelCase : int = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase )
# Get the valid email.
UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 38
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : str = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : List[Any] = b.T
UpperCamelCase : Union[str, Any] = np.sum(np.square(_lowerCAmelCase ) , axis=1 )
UpperCamelCase : List[str] = np.sum(np.square(_lowerCAmelCase ) , axis=0 )
UpperCamelCase : Dict = np.matmul(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Dict = x.reshape(-1 , 3 )
UpperCamelCase : List[Any] = squared_euclidean_distance(_lowerCAmelCase , _lowerCAmelCase )
return np.argmin(_lowerCAmelCase , axis=1 )
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['pixel_values']
def __init__( self , A_ = None , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = True , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : List[str] = size if size is not None else {"height": 256, "width": 256}
UpperCamelCase : Optional[int] = get_size_dict(A_ )
UpperCamelCase : int = np.array(A_ ) if clusters is not None else None
UpperCamelCase : Optional[Any] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = resample
UpperCamelCase : Dict = do_normalize
UpperCamelCase : Any = do_color_quantize
def __UpperCamelCase( self , A_ , A_ , A_ = PILImageResampling.BILINEAR , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
A_ , size=(size["height"], size["width"]) , resample=A_ , data_format=A_ , **A_ )
def __UpperCamelCase( self , A_ , A_ = None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = rescale(image=A_ , scale=1 / 127.5 , data_format=A_ )
UpperCamelCase : Tuple = image - 1
return image
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : str = size if size is not None else self.size
UpperCamelCase : List[str] = get_size_dict(A_ )
UpperCamelCase : Any = resample if resample is not None else self.resample
UpperCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase : Tuple = clusters if clusters is not None else self.clusters
UpperCamelCase : Dict = np.array(A_ )
UpperCamelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
UpperCamelCase : int = [to_numpy_array(A_ ) for image in images]
if do_resize:
UpperCamelCase : str = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_normalize:
UpperCamelCase : Any = [self.normalize(image=A_ ) for image in images]
if do_color_quantize:
UpperCamelCase : Optional[int] = [to_channel_dimension_format(A_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase : Optional[Any] = np.array(A_ )
UpperCamelCase : str = color_quantize(A_ , A_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase : int = images.shape[0]
UpperCamelCase : Optional[int] = images.reshape(A_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase : Any = list(A_ )
else:
UpperCamelCase : str = [to_channel_dimension_format(A_ , A_ ) for image in images]
UpperCamelCase : Dict = {"input_ids": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 716
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[Any] = [True] * limit
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase : Optional[Any] = i * 2
while index < limit:
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = index + i
UpperCamelCase : Any = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 100_0000 ) -> int:
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase : int = j - i
UpperCamelCase : Dict = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 38
| 0
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Dict = [[float("inf" ) for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCAmelCase ):
# looping through rows of graph array
for i in range(_lowerCAmelCase ):
# looping through columns of graph array
for j in range(_lowerCAmelCase ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCamelCase : Union[str, Any] = dist[i][k] + dist[k][j]
_print_dist(_lowerCAmelCase , _lowerCAmelCase )
return dist, v
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = int(input("""Enter number of vertices: """))
__lowerCamelCase : int = int(input("""Enter number of edges: """))
__lowerCamelCase : Any = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
__lowerCamelCase : Dict = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
__lowerCamelCase : Union[str, Any] = int(input("""Enter source:"""))
__lowerCamelCase : Dict = int(input("""Enter destination:"""))
__lowerCamelCase : str = float(input("""Enter weight:"""))
__lowerCamelCase : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 717
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(
features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
UpperCamelCase : Optional[int] = Generator(
cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
UpperCamelCase : int = self.builder.as_dataset(
split="train" , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
| 38
| 0
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class A__ ( __snake_case ):
def __init__( self , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Any = path_or_paths
UpperCamelCase : Any = split if split or isinstance(A_ , A_ ) else "train"
UpperCamelCase : Union[str, Any] = features
UpperCamelCase : Dict = cache_dir
UpperCamelCase : Dict = keep_in_memory
UpperCamelCase : Dict = streaming
UpperCamelCase : str = num_proc
UpperCamelCase : Union[str, Any] = kwargs
@abstractmethod
def __UpperCamelCase( self ):
'''simple docstring'''
pass
class A__ ( __snake_case ):
def __init__( self , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = features
UpperCamelCase : Optional[int] = cache_dir
UpperCamelCase : List[str] = keep_in_memory
UpperCamelCase : Tuple = streaming
UpperCamelCase : List[str] = num_proc
UpperCamelCase : Any = kwargs
@abstractmethod
def __UpperCamelCase( self ):
'''simple docstring'''
pass
| 718
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 38
| 0
|
# Algorithm for the pigeonhole sorting
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : List[str] = min(_lowerCAmelCase ) # min() finds the minimum value
UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # max() finds the maximum value
UpperCamelCase : Union[str, Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase : Union[str, Any] = 0
for count in range(_lowerCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase : Dict = count + min_val
i += 1
def A_ ( ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCAmelCase )
print("Sorted order is:" , " ".join(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 719
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 38
| 0
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 720
|
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str:
UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def A_ ( _lowerCAmelCase ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.keras.activations.gelu
__lowerCamelCase : int = approximate_gelu_wrap
else:
__lowerCamelCase : List[Any] = _gelu
__lowerCamelCase : Optional[Any] = _gelu_new
__lowerCamelCase : Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 38
| 0
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
if hor == 128:
UpperCamelCase : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCamelCase : Tuple = (32, 128, 256)
UpperCamelCase : List[str] = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
UpperCamelCase : Union[str, Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCamelCase : str = (32, 64, 128, 256)
UpperCamelCase : Union[str, Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
UpperCamelCase : List[Any] = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
UpperCamelCase : Optional[int] = model.state_dict()
UpperCamelCase : Union[str, Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
UpperCamelCase : int = UNetaDModel(**_lowerCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase : Tuple = state_dict.pop(_lowerCAmelCase )
hf_value_function.load_state_dict(_lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( ) -> int:
UpperCamelCase : Tuple = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
UpperCamelCase : Tuple = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
UpperCamelCase : int = model
UpperCamelCase : Tuple = UNetaDModel(**_lowerCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase : Union[str, Any] = state_dict.pop(_lowerCAmelCase )
hf_value_function.load_state_dict(_lowerCAmelCase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 0
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : str = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCAmelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase = model_type_to_module_name(lowerCamelCase )
__lowerCAmelCase = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(lowerCamelCase , lowerCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase , "__name__" , lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase = importlib.import_module("transformers" )
if hasattr(lowerCamelCase , lowerCamelCase ):
return getattr(lowerCamelCase , lowerCamelCase )
return None
def __lowerCAmelCase ( lowerCamelCase : Union[str, os.PathLike] , lowerCamelCase : Optional[Union[str, os.PathLike]] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : Optional[Dict[str, str]] = None , lowerCamelCase : Optional[Union[bool, str]] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : bool = False , **lowerCamelCase : Any , ):
'''simple docstring'''
__lowerCAmelCase = get_file_from_repo(
lowerCamelCase , lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , resume_download=lowerCamelCase , proxies=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , local_files_only=lowerCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(lowerCamelCase , encoding="utf-8" ) as reader:
return json.load(lowerCamelCase )
class UpperCAmelCase__ :
def __init__( self ) -> Optional[Any]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase )
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> int:
__lowerCAmelCase = kwargs.pop("config" , UpperCamelCase )
__lowerCAmelCase = kwargs.pop("trust_remote_code" , UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = config_dict.get("feature_extractor_type" , UpperCamelCase )
__lowerCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__lowerCAmelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
# It could be in `config.feature_extractor_type``
__lowerCAmelCase = getattr(UpperCamelCase , "feature_extractor_type" , UpperCamelCase )
if hasattr(UpperCamelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
__lowerCAmelCase = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
__lowerCAmelCase = feature_extractor_class_from_name(UpperCamelCase )
__lowerCAmelCase = feature_extractor_auto_map is not None
__lowerCAmelCase = feature_extractor_class is not None or type(UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING
__lowerCAmelCase = resolve_trust_remote_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if has_remote_code and trust_remote_code:
__lowerCAmelCase = get_class_from_dynamic_module(
UpperCamelCase , UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = kwargs.pop("code_revision" , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase , **UpperCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase , **UpperCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
__lowerCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase )]
return feature_extractor_class.from_dict(UpperCamelCase , **UpperCamelCase )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Any:
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase , UpperCamelCase )
| 39
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=18 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , ) -> List[str]:
__lowerCAmelCase = size if size is not None else {"shortest_edge": 20}
__lowerCAmelCase = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_flip_channel_order
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : int = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase , "center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_flip_channel_order" ) )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> List[Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 39
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__lowerCAmelCase = features.copy()
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = jsonl_path
elif issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = [jsonl_path]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: jsonl_path}
else:
__lowerCAmelCase = "train"
__lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path}
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return json.load(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
return [json.loads(lowerCamelCase ) for line in buffer]
class UpperCAmelCase__ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
__lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
assert exported_content == original_content
| 39
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : int = CpmAntTokenizer
a : List[str] = False
def UpperCAmelCase_ ( self ) -> Tuple:
super().setUp()
__lowerCAmelCase = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
__lowerCAmelCase = "今天天气真好!"
__lowerCAmelCase = ["今天", "天气", "真", "好", "!"]
__lowerCAmelCase = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = "今天天气真好!"
__lowerCAmelCase = [tokenizer.bos_token] + tokens
__lowerCAmelCase = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
__lowerCAmelCase = tokenizer.decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 39
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Any = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[str] = (CMStochasticIterativeScheduler,)
a : str = 1_0
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str:
__lowerCAmelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = 10
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps[0]
__lowerCAmelCase = scheduler.timesteps[1]
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = 1
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 1, 0]
__lowerCAmelCase = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase : Any = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **UpperCamelCase ) -> Union[str, Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCAmelCase = deprecated_arg[3:]
__lowerCAmelCase = not kwargs.pop(UpperCamelCase )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
__lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name )
__lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx )
__lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode )
__lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**UpperCamelCase )
a : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Name of TPU"""} , )
a : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
a : bool = field(default=UpperCamelCase__ , metadata={"""help""": """Benchmark models in eager model."""} )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def UpperCAmelCase_ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
__lowerCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowerCAmelCase = None
return tpu
@cached_property
def UpperCAmelCase_ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def UpperCAmelCase_ ( self ) -> bool:
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCAmelCase_ ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCAmelCase_ ( self ) -> int:
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCAmelCase_ ( self ) -> bool:
return self.n_gpu > 0
| 39
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 39
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase__ :
a : int
a : Node | None = None
a : Node | None = None
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Node(1 )
__lowerCAmelCase = Node(2 )
__lowerCAmelCase = Node(3 )
__lowerCAmelCase = Node(4 )
__lowerCAmelCase = Node(5 )
return tree
def __lowerCAmelCase ( lowerCamelCase : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __lowerCAmelCase ( lowerCamelCase : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __lowerCAmelCase ( lowerCamelCase : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __lowerCAmelCase ( lowerCamelCase : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __lowerCAmelCase ( lowerCamelCase : Node | None ):
'''simple docstring'''
__lowerCAmelCase = []
if root is None:
return output
__lowerCAmelCase = deque([root] )
while process_queue:
__lowerCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __lowerCAmelCase ( lowerCamelCase : Node | None , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = []
def populate_output(lowerCamelCase : Node | None , lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def __lowerCAmelCase ( lowerCamelCase : Node | None , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = []
def populate_output(lowerCamelCase : Node | None , lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def __lowerCAmelCase ( lowerCamelCase : Node | None ):
'''simple docstring'''
if root is None:
return []
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = height(lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = 0
return output
def __lowerCAmelCase ( ): # Main function for testing.
'''simple docstring'''
__lowerCAmelCase = make_tree()
print(f'''In-order Traversal: {inorder(lowerCamelCase )}''' )
print(f'''Pre-order Traversal: {preorder(lowerCamelCase )}''' )
print(f'''Post-order Traversal: {postorder(lowerCamelCase )}''' , "\n" )
print(f'''Height of Tree: {height(lowerCamelCase )}''' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowerCamelCase ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowerCamelCase ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(lowerCamelCase , level=lowerCamelCase ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 39
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__lowerCAmelCase = len(lowerCamelCase )
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
return len(lowerCamelCase ), len(matrix[0] )
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
print("\n".join(str(lowerCamelCase ) for line in matrix ) )
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
__lowerCAmelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase )
__lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 39
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : Tuple = LDMTextToImagePipeline
a : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
a : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
a : str = TEXT_TO_IMAGE_BATCH_PARAMS
a : List[Any] = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCAmelCase = CLIPTextModel(UpperCamelCase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Dict:
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = LDMTextToImagePipeline(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = self.get_dummy_inputs(UpperCamelCase )
__lowerCAmelCase = pipe(**UpperCamelCase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__lowerCAmelCase = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=torch.floataa , UpperCamelCase=0 ) -> Union[str, Any]:
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
__lowerCAmelCase = np.random.RandomState(UpperCamelCase ).standard_normal((1, 4, 32, 32) )
__lowerCAmelCase = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase , dtype=UpperCamelCase )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = self.get_inputs(UpperCamelCase )
__lowerCAmelCase = pipe(**UpperCamelCase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__lowerCAmelCase = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
__lowerCAmelCase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=torch.floataa , UpperCamelCase=0 ) -> Tuple:
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
__lowerCAmelCase = np.random.RandomState(UpperCamelCase ).standard_normal((1, 4, 32, 32) )
__lowerCAmelCase = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase , dtype=UpperCamelCase )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = self.get_inputs(UpperCamelCase )
__lowerCAmelCase = pipe(**UpperCamelCase ).images[0]
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
__lowerCAmelCase = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 39
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 39
| 1
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase : Dict = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : List[str]=None ):
'''simple docstring'''
require_version(deps[pkg] , lowerCamelCase )
| 39
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def UpperCAmelCase_ ( self ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = ort.SessionOptions()
__lowerCAmelCase = False
return options
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = "A red cat sitting on a park bench"
__lowerCAmelCase = np.random.RandomState(0 )
__lowerCAmelCase = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase , output_type="np" , )
__lowerCAmelCase = output.images
__lowerCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
__lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = "A red cat sitting on a park bench"
__lowerCAmelCase = np.random.RandomState(0 )
__lowerCAmelCase = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase , output_type="np" , )
__lowerCAmelCase = output.images
__lowerCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 39
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> Dict:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = False
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]:
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''' )
__lowerCAmelCase = root
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> List[Any]:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCAmelCase = DisjunctiveTrie(UpperCamelCase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = False
__lowerCAmelCase = []
def UpperCAmelCase_ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = False
self.init_state()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str:
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 39
| 1
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : List[Any] = logging.getLogger()
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("-f" )
__lowerCAmelCase = parser.parse_args()
return args.f
class UpperCAmelCase__ ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self ) -> None:
__lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(UpperCamelCase , "argv" , UpperCamelCase ):
__lowerCAmelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCamelCase , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(UpperCamelCase )
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(UpperCamelCase )
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(UpperCamelCase )
| 39
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] = False
@property
def UpperCAmelCase_ ( self ) -> int:
return 32
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> int:
return 100
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCAmelCase = MultilingualCLIP(UpperCamelCase )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCamelCase )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase : Tuple = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 39
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 39
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCAmelCase = (boundary[1] - boundary[0]) / steps
__lowerCAmelCase = boundary[0]
__lowerCAmelCase = boundary[1]
__lowerCAmelCase = make_points(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = 0.0
y += (h / 2.0) * f(lowerCamelCase )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase )
y += (h / 2.0) * f(lowerCamelCase )
return y
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = a + h
while x < (b - h):
yield x
__lowerCAmelCase = x + h
def __lowerCAmelCase ( lowerCamelCase : int ): # enter your function here
'''simple docstring'''
__lowerCAmelCase = (x - 0) * (x - 0)
return y
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 0.0 # Lower bound of integration
__lowerCAmelCase = 1.0 # Upper bound of integration
__lowerCAmelCase = 1_0.0 # define number of steps or resolution
__lowerCAmelCase = [a, b] # define boundary of integration
__lowerCAmelCase = method_a(lowerCamelCase , lowerCamelCase )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 39
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
lowerCAmelCase : Dict[Optional[str], str] = {}
lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__lowerCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__lowerCAmelCase = format_type
def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowerCAmelCase ( lowerCamelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = get_format_type_from_alias(lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 39
| 1
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase : List[Any] = sys.version_info >= (3, 1_0)
def __lowerCAmelCase ( lowerCamelCase : Optional[int]=None , lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCamelCase )
@dataclass
class UpperCAmelCase__ :
a : int
a : float
a : str
a : bool
@dataclass
class UpperCAmelCase__ :
a : int = 4_2
a : str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class UpperCAmelCase__ :
a : bool = False
a : bool = True
a : Optional[bool] = None
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Optional[int] = """titi"""
a : int = """toto"""
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Union[str, Any] = """titi"""
a : Any = """toto"""
a : Union[str, Any] = 4_2
@dataclass
class UpperCAmelCase__ :
a : BasicEnum = "toto"
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase__ :
a : MixedTypeEnum = "toto"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase__ :
a : Optional[int] = None
a : Optional[float] = field(default=UpperCamelCase__ , metadata={"""help""": """help message"""} )
a : Optional[str] = None
a : Optional[List[str]] = list_field(default=[] )
a : Optional[List[int]] = list_field(default=[] )
@dataclass
class UpperCAmelCase__ :
a : List[int] = list_field(default=[] )
a : List[int] = list_field(default=[1, 2, 3] )
a : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
a : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase__ :
a : List[int] = field()
a : str = field()
a : BasicEnum = field()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase__ :
a : int
a : "BasicEnum" = field()
a : "Optional[bool]" = None
a : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
a : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase__ :
a : bool = False
a : bool = True
a : bool | None = None
@dataclass
class UpperCAmelCase__ :
a : int | None = None
a : float | None = field(default=UpperCamelCase__ , metadata={"""help""": """help message"""} )
a : str | None = None
a : list[str] | None = list_field(default=[] )
a : list[int] | None = list_field(default=[] )
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Any:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__lowerCAmelCase = {k: v for k, v in vars(UpperCamelCase ).items() if k != "container"}
__lowerCAmelCase = {k: v for k, v in vars(UpperCamelCase ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , UpperCamelCase ) and yy.get("choices" , UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](UpperCamelCase ) , yy["type"](UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument("--bar" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument("--baz" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument("--flag" , type=UpperCamelCase , default=UpperCamelCase , const=UpperCamelCase , nargs="?" )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((__lowerCAmelCase) , ) = parser.parse_args_into_dataclasses(UpperCamelCase , look_for_args_file=UpperCamelCase )
self.assertFalse(example.flag )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=UpperCamelCase )
expected.add_argument("--baz" , default="toto" , type=UpperCamelCase , help="help message" )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=UpperCamelCase , default=UpperCamelCase , const=UpperCamelCase , nargs="?" )
expected.add_argument("--baz" , type=UpperCamelCase , default=UpperCamelCase , const=UpperCamelCase , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=UpperCamelCase , dest="baz" )
expected.add_argument("--opt" , type=UpperCamelCase , default=UpperCamelCase )
__lowerCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase )
for dataclass_type in dataclass_types:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
__lowerCAmelCase = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
__lowerCAmelCase = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
__lowerCAmelCase = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
__lowerCAmelCase = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , baz=UpperCamelCase , opt=UpperCamelCase ) )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__lowerCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__lowerCAmelCase = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__lowerCAmelCase = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__lowerCAmelCase = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
__lowerCAmelCase = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase_ ( self ) -> List[str]:
@dataclass
class UpperCAmelCase__ :
a : Literal["titi", "toto", 4_2] = "toto"
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__lowerCAmelCase = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__lowerCAmelCase = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=UpperCamelCase )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=UpperCamelCase )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=UpperCamelCase )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=UpperCamelCase )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(
UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
__lowerCAmelCase = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , default=UpperCamelCase , type=UpperCamelCase )
expected.add_argument("--bar" , default=UpperCamelCase , type=UpperCamelCase , help="help message" )
expected.add_argument("--baz" , default=UpperCamelCase , type=UpperCamelCase )
expected.add_argument("--ces" , nargs="+" , default=[] , type=UpperCamelCase )
expected.add_argument("--des" , nargs="+" , default=[] , type=UpperCamelCase )
__lowerCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase )
for dataclass_type in dataclass_types:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(UpperCamelCase , Namespace(foo=UpperCamelCase , bar=UpperCamelCase , baz=UpperCamelCase , ces=[] , des=[] ) )
__lowerCAmelCase = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(UpperCamelCase , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument("--required_str" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=UpperCamelCase , )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=UpperCamelCase , required=UpperCamelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=UpperCamelCase , )
expected.add_argument("--opt" , type=UpperCamelCase , default=UpperCamelCase )
expected.add_argument("--baz" , default="toto" , type=UpperCamelCase , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=UpperCamelCase )
self.argparsersEqual(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
__lowerCAmelCase = parser.parse_dict(UpperCamelCase )[0]
__lowerCAmelCase = BasicExample(**UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(UpperCamelCase , parser.parse_dict , UpperCamelCase , allow_extra_keys=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = os.path.join(UpperCamelCase , "temp_json" )
os.mkdir(UpperCamelCase )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
__lowerCAmelCase = BasicExample(**UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
__lowerCAmelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = os.path.join(UpperCamelCase , "temp_yaml" )
os.mkdir(UpperCamelCase )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
__lowerCAmelCase = BasicExample(**UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = HfArgumentParser(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
| 39
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = "imagenet-22k-id2label.json"
else:
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "head" in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
else:
__lowerCAmelCase = "focalnet." + name
return name
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCamelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(lowerCamelCase )
__lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase )
# verify conversion
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Dict=False ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ""
else:
__lowerCAmelCase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(lowerCamelCase )
__lowerCAmelCase = val
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=True ):
'''simple docstring'''
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 10_00
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load("facebookresearch/dino:main" , lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase )
__lowerCAmelCase = create_rename_keys(lowerCamelCase , base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCamelCase , add_pooling_layer=lowerCamelCase ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCamelCase ).eval()
model.load_state_dict(lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = model(lowerCamelCase )
if base_model:
__lowerCAmelCase = original_model(lowerCamelCase )
assert torch.allclose(lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__lowerCAmelCase = original_model(lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
lowerCAmelCase : int = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
lowerCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Dict = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**UpperCamelCase )
__lowerCAmelCase = do_lower_case
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
lowerCAmelCase : List[str] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 39
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(lowerCamelCase ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 1
|
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=0.0 , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = "layer_norm" , UpperCamelCase = False , ) -> int:
super().__init__()
__lowerCAmelCase = only_cross_attention
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCAmelCase = AdaLayerNorm(UpperCamelCase , UpperCamelCase )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase = AdaLayerNormZero(UpperCamelCase , UpperCamelCase )
else:
__lowerCAmelCase = nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase )
__lowerCAmelCase = Attention(
query_dim=UpperCamelCase , heads=UpperCamelCase , dim_head=UpperCamelCase , dropout=UpperCamelCase , bias=UpperCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCAmelCase = (
AdaLayerNorm(UpperCamelCase , UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase )
)
__lowerCAmelCase = Attention(
query_dim=UpperCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase , dim_head=UpperCamelCase , dropout=UpperCamelCase , bias=UpperCamelCase , upcast_attention=UpperCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
# 3. Feed-forward
__lowerCAmelCase = nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase )
__lowerCAmelCase = FeedForward(UpperCamelCase , dropout=UpperCamelCase , activation_fn=UpperCamelCase , final_dropout=UpperCamelCase )
# let chunk size default to None
__lowerCAmelCase = None
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Tuple:
# Sets chunk feed-forward
__lowerCAmelCase = chunk_size
__lowerCAmelCase = dim
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ) -> Any:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__lowerCAmelCase = self.norma(UpperCamelCase , UpperCamelCase )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.norma(
UpperCamelCase , UpperCamelCase , UpperCamelCase , hidden_dtype=hidden_states.dtype )
else:
__lowerCAmelCase = self.norma(UpperCamelCase )
__lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCAmelCase = self.attna(
UpperCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase , **UpperCamelCase , )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
__lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCAmelCase = (
self.norma(UpperCamelCase , UpperCamelCase ) if self.use_ada_layer_norm else self.norma(UpperCamelCase )
)
__lowerCAmelCase = self.attna(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , attention_mask=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
__lowerCAmelCase = self.norma(UpperCamelCase )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
__lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCAmelCase = torch.cat(
[self.ff(UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__lowerCAmelCase = self.ff(UpperCamelCase )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
__lowerCAmelCase = ff_output + hidden_states
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = 4 , UpperCamelCase = 0.0 , UpperCamelCase = "geglu" , UpperCamelCase = False , ) -> Union[str, Any]:
super().__init__()
__lowerCAmelCase = int(dim * mult )
__lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCAmelCase = GELU(UpperCamelCase , UpperCamelCase )
if activation_fn == "gelu-approximate":
__lowerCAmelCase = GELU(UpperCamelCase , UpperCamelCase , approximate="tanh" )
elif activation_fn == "geglu":
__lowerCAmelCase = GEGLU(UpperCamelCase , UpperCamelCase )
elif activation_fn == "geglu-approximate":
__lowerCAmelCase = ApproximateGELU(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase ) )
# project out
self.net.append(nn.Linear(UpperCamelCase , UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
for module in self.net:
__lowerCAmelCase = module(UpperCamelCase )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = "none" ) -> Dict:
super().__init__()
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = approximate
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if gate.device.type != "mps":
return F.gelu(UpperCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = self.proj(UpperCamelCase )
__lowerCAmelCase = self.gelu(UpperCamelCase )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase ) -> int:
super().__init__()
__lowerCAmelCase = nn.Linear(UpperCamelCase , dim_out * 2 )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if gate.device.type != "mps":
return F.gelu(UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.proj(UpperCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase )
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase ) -> Dict:
super().__init__()
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.proj(UpperCamelCase )
return x * torch.sigmoid(1.7_02 * x )
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase ) -> Tuple:
super().__init__()
__lowerCAmelCase = nn.Embedding(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(UpperCamelCase , embedding_dim * 2 )
__lowerCAmelCase = nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Any:
__lowerCAmelCase = self.linear(self.silu(self.emb(UpperCamelCase ) ) )
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(UpperCamelCase , 2 )
__lowerCAmelCase = self.norm(UpperCamelCase ) * (1 + scale) + shift
return x
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase ) -> List[str]:
super().__init__()
__lowerCAmelCase = CombinedTimestepLabelEmbeddings(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(UpperCamelCase , 6 * embedding_dim , bias=UpperCamelCase )
__lowerCAmelCase = nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase , eps=1E-6 )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> List[Any]:
__lowerCAmelCase = self.linear(self.silu(self.emb(UpperCamelCase , UpperCamelCase , hidden_dtype=UpperCamelCase ) ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = emb.chunk(6 , dim=1 )
__lowerCAmelCase = self.norm(UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = 1E-5 ) -> List[str]:
super().__init__()
__lowerCAmelCase = num_groups
__lowerCAmelCase = eps
if act_fn is None:
__lowerCAmelCase = None
else:
__lowerCAmelCase = get_activation(UpperCamelCase )
__lowerCAmelCase = nn.Linear(UpperCamelCase , out_dim * 2 )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> int:
if self.act:
__lowerCAmelCase = self.act(UpperCamelCase )
__lowerCAmelCase = self.linear(UpperCamelCase )
__lowerCAmelCase = emb[:, :, None, None]
__lowerCAmelCase , __lowerCAmelCase = emb.chunk(2 , dim=1 )
__lowerCAmelCase = F.group_norm(UpperCamelCase , self.num_groups , eps=self.eps )
__lowerCAmelCase = x * (1 + scale) + shift
return x
| 39
|
'''simple docstring'''
import re
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 39
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Optional[int] = 1.6021e-19 # units = C
def __lowerCAmelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {"BertModelTest": "BertModelTester"}
__lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[str] = SpeechTaTokenizer
a : Any = False
a : Dict = True
def UpperCAmelCase_ ( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = SpeechTaTokenizer(UpperCamelCase )
__lowerCAmelCase = AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
__lowerCAmelCase = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "this is a test"
__lowerCAmelCase = "this is a test"
return input_text, output_text
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=20 , UpperCamelCase=5 ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(UpperCamelCase )
__lowerCAmelCase = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
__lowerCAmelCase = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(UpperCamelCase ) , 81 )
def UpperCAmelCase_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCAmelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__lowerCAmelCase = tokenizer.add_tokens(UpperCamelCase )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size + len(UpperCamelCase ) )
__lowerCAmelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowerCAmelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__lowerCAmelCase = tokenizer.add_special_tokens(UpperCamelCase )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size_a + len(UpperCamelCase ) )
__lowerCAmelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(UpperCamelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase )
# fmt: off
self.assertListEqual(UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def UpperCAmelCase_ ( self ) -> int:
# Use custom sequence because this tokenizer does not handle numbers.
__lowerCAmelCase = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
__lowerCAmelCase = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=UpperCamelCase , )
| 39
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]:
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = self.proj_in(UpperCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , )
# 3. Output
__lowerCAmelCase = self.proj_out(UpperCamelCase )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowerCAmelCase : List[str] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : tuple , lowerCamelCase : Path , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : str=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=lowerCamelCase , exist_ok=lowerCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCamelCase , lowerCamelCase , f=output_path.as_posix() , input_names=lowerCamelCase , output_names=lowerCamelCase , dynamic_axes=lowerCamelCase , do_constant_folding=lowerCamelCase , use_external_data_format=lowerCamelCase , enable_onnx_checker=lowerCamelCase , opset_version=lowerCamelCase , )
else:
export(
lowerCamelCase , lowerCamelCase , f=output_path.as_posix() , input_names=lowerCamelCase , output_names=lowerCamelCase , dynamic_axes=lowerCamelCase , do_constant_folding=lowerCamelCase , opset_version=lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowerCamelCase , torch_dtype=lowerCamelCase ).to(lowerCamelCase )
__lowerCAmelCase = Path(lowerCamelCase )
# TEXT ENCODER
__lowerCAmelCase = pipeline.text_encoder.config.max_position_embeddings
__lowerCAmelCase = pipeline.text_encoder.config.hidden_size
__lowerCAmelCase = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCamelCase , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=lowerCamelCase , )
del pipeline.text_encoder
# UNET
__lowerCAmelCase = pipeline.unet.config.in_channels
__lowerCAmelCase = pipeline.unet.config.sample_size
__lowerCAmelCase = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCamelCase , lowerCamelCase , lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase ),
torch.randn(2 ).to(device=lowerCamelCase , dtype=lowerCamelCase ),
torch.randn(2 , lowerCamelCase , lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase ),
False,
) , output_path=lowerCamelCase , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=lowerCamelCase , use_external_data_format=lowerCamelCase , )
__lowerCAmelCase = str(unet_path.absolute().as_posix() )
__lowerCAmelCase = os.path.dirname(lowerCamelCase )
__lowerCAmelCase = onnx.load(lowerCamelCase )
# clean up existing tensor files
shutil.rmtree(lowerCamelCase )
os.mkdir(lowerCamelCase )
# collate external tensor files into one
onnx.save_model(
lowerCamelCase , lowerCamelCase , save_as_external_data=lowerCamelCase , all_tensors_to_one_file=lowerCamelCase , location="weights.pb" , convert_attribute=lowerCamelCase , )
del pipeline.unet
# VAE ENCODER
__lowerCAmelCase = pipeline.vae
__lowerCAmelCase = vae_encoder.config.in_channels
__lowerCAmelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__lowerCAmelCase = lambda lowerCamelCase , lowerCamelCase : vae_encoder.encode(lowerCamelCase , lowerCamelCase )[0].sample()
onnx_export(
lowerCamelCase , model_args=(
torch.randn(1 , lowerCamelCase , lowerCamelCase , lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCamelCase , )
# VAE DECODER
__lowerCAmelCase = pipeline.vae
__lowerCAmelCase = vae_decoder.config.latent_channels
__lowerCAmelCase = vae_decoder.config.out_channels
# forward only through the decoder part
__lowerCAmelCase = vae_encoder.decode
onnx_export(
lowerCamelCase , model_args=(
torch.randn(1 , lowerCamelCase , lowerCamelCase , lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCamelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__lowerCAmelCase = pipeline.safety_checker
__lowerCAmelCase = safety_checker.config.vision_config.num_channels
__lowerCAmelCase = safety_checker.config.vision_config.image_size
__lowerCAmelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCamelCase , lowerCamelCase , lowerCamelCase , ).to(device=lowerCamelCase , dtype=lowerCamelCase ),
torch.randn(1 , lowerCamelCase , lowerCamelCase , lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=lowerCamelCase , )
del pipeline.safety_checker
__lowerCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
__lowerCAmelCase = pipeline.feature_extractor
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCamelCase )
print("ONNX pipeline saved to" , lowerCamelCase )
del pipeline
del onnx_pipeline
__lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase : Any = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 39
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 39
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : str = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple:
__lowerCAmelCase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = model
__lowerCAmelCase = cache
__lowerCAmelCase = force
__lowerCAmelCase = trust_remote_code
def UpperCAmelCase_ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 39
| 1
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 39
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 39
| 1
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase : Dict = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCAmelCase : List[Any] = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
__lowerCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCamelCase , )
is not None
):
__lowerCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowerCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowerCAmelCase = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
__lowerCAmelCase = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
__lowerCAmelCase = True
if not attribute_used:
__lowerCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowerCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowerCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowerCAmelCase = True
elif attribute.endswith("_token_id" ):
__lowerCAmelCase = True
# configuration class specific cases
if not case_allowed:
__lowerCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__lowerCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __lowerCAmelCase ( lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowerCAmelCase = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
__lowerCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowerCAmelCase = {}
if len(config_class.attribute_map ) > 0:
__lowerCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowerCAmelCase = inspect.getsourcefile(lowerCamelCase )
__lowerCAmelCase = os.path.dirname(lowerCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowerCAmelCase = [os.path.join(lowerCamelCase , lowerCamelCase ) for fn in os.listdir(lowerCamelCase ) if fn.startswith("modeling_" )]
# Get the source code strings
__lowerCAmelCase = []
for path in modeling_paths:
if os.path.isfile(lowerCamelCase ):
with open(lowerCamelCase ) as fp:
modeling_sources.append(fp.read() )
__lowerCAmelCase = []
for config_param, default_value in zip(lowerCamelCase , lowerCamelCase ):
# `attributes` here is all the variant names for `config_param`
__lowerCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(lowerCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowerCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCamelCase : inspect.isclass(lowerCamelCase )
and issubclass(lowerCamelCase , lowerCamelCase )
and inspect.getmodule(lowerCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__lowerCAmelCase = check_config_attributes_being_used(lowerCamelCase )
if len(lowerCamelCase ) > 0:
__lowerCAmelCase = unused_attributes
if len(lowerCamelCase ) > 0:
__lowerCAmelCase = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 39
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Optional[Any] = """dpr"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 39
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Any=False , lowerCamelCase : int=False ):
'''simple docstring'''
__lowerCAmelCase = "backbone." if is_semantic else ""
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(f'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(f'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(f'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : str=False , lowerCamelCase : List[Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__lowerCAmelCase = "backbone." if is_semantic else ""
# queries, keys and values
__lowerCAmelCase = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
__lowerCAmelCase = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = q_bias
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowerCAmelCase = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
__lowerCAmelCase = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
__lowerCAmelCase = gamma_a
__lowerCAmelCase = gamma_a
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(lowerCamelCase )
__lowerCAmelCase = val
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = False if "rvlcdip" in checkpoint_url else True
__lowerCAmelCase = BeitConfig(use_absolute_position_embeddings=lowerCamelCase , use_mask_token=lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowerCAmelCase = 10_24
__lowerCAmelCase = 40_96
__lowerCAmelCase = 24
__lowerCAmelCase = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowerCAmelCase = 16
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "rvlcdip-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
__lowerCAmelCase = create_rename_keys(lowerCamelCase , has_lm_head=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase , has_lm_head=lowerCamelCase )
# load HuggingFace model
__lowerCAmelCase = BeitForMaskedImageModeling(lowerCamelCase ) if has_lm_head else BeitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# Check outputs on an image
__lowerCAmelCase = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = model(lowerCamelCase )
__lowerCAmelCase = outputs.logits
# verify logits
__lowerCAmelCase = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(lowerCamelCase ), "Shape of logits not as expected"
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
if has_lm_head:
__lowerCAmelCase = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
__lowerCAmelCase = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCAmelCase : int = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase : Tuple = TypeVar('''KT''')
lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class UpperCAmelCase__ ( Generic[KT, VT] ):
def __init__( self , UpperCamelCase = "root" , UpperCamelCase = None ) -> int:
__lowerCAmelCase = key
__lowerCAmelCase = value
__lowerCAmelCase = []
def __repr__( self ) -> str:
return F'''Node({self.key}: {self.value})'''
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.forward )
class UpperCAmelCase__ ( Generic[KT, VT] ):
def __init__( self , UpperCamelCase = 0.5 , UpperCamelCase = 16 ) -> List[Any]:
__lowerCAmelCase = Node[KT, VT]()
__lowerCAmelCase = 0
__lowerCAmelCase = p
__lowerCAmelCase = max_level
def __str__( self ) -> str:
__lowerCAmelCase = list(self )
if len(UpperCamelCase ) == 0:
return F'''SkipList(level={self.level})'''
__lowerCAmelCase = max((len(str(UpperCamelCase ) ) for item in items) , default=4 )
__lowerCAmelCase = max(UpperCamelCase , 4 ) + 4
__lowerCAmelCase = self.head
__lowerCAmelCase = []
__lowerCAmelCase = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(UpperCamelCase , "-" ) + "* " * len(UpperCamelCase ) )
lines.append(" " * label_size + "| " * len(UpperCamelCase ) )
while len(node.forward ) != 0:
__lowerCAmelCase = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(UpperCamelCase , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(UpperCamelCase ) )
__lowerCAmelCase = node.forward
lines.append("None".ljust(UpperCamelCase ) + "* " * len(UpperCamelCase ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(UpperCamelCase )
def __iter__( self ) -> Tuple:
__lowerCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowerCAmelCase = node.forward[0]
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase_ ( self , UpperCamelCase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
__lowerCAmelCase = []
__lowerCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowerCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(UpperCamelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self._locate_node(UpperCamelCase )
if node is not None:
for i, update_node in enumerate(UpperCamelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowerCAmelCase = node.forward[i]
else:
__lowerCAmelCase = update_node.forward[:i]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self._locate_node(UpperCamelCase )
if node is not None:
__lowerCAmelCase = value
else:
__lowerCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , UpperCamelCase ):
update_vector.append(self.head )
__lowerCAmelCase = level
__lowerCAmelCase = Node(UpperCamelCase , UpperCamelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(UpperCamelCase )
else:
__lowerCAmelCase = new_node
def UpperCAmelCase_ ( self , UpperCamelCase ) -> VT | None:
__lowerCAmelCase , __lowerCAmelCase = self._locate_node(UpperCamelCase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
__lowerCAmelCase = skip_list.head
__lowerCAmelCase = {}
while node.level != 0:
__lowerCAmelCase = node.forward[0]
__lowerCAmelCase = node.value
assert len(lowerCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
__lowerCAmelCase = skip_list.head
__lowerCAmelCase = {}
while node.level != 0:
__lowerCAmelCase = node.forward[0]
__lowerCAmelCase = node.value
if len(lowerCamelCase ) != 4:
print()
assert len(lowerCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
assert skip_list.find("Some key" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 1_42 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(lowerCamelCase : List[str] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(lowerCamelCase : List[str] ):
return all(next_item >= item for item, next_item in zip(lowerCamelCase , lst[1:] ) )
__lowerCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(lowerCamelCase , lowerCamelCase )
assert is_sorted(list(lowerCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCamelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowerCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 39
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__lowerCAmelCase = features.copy()
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = jsonl_path
elif issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = [jsonl_path]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: jsonl_path}
else:
__lowerCAmelCase = "train"
__lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path}
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return json.load(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
return [json.loads(lowerCamelCase ) for line in buffer]
class UpperCAmelCase__ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
__lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
assert exported_content == original_content
| 39
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowerCAmelCase ( lowerCamelCase : Union[dict, list, tuple, torch.Tensor] ):
'''simple docstring'''
__lowerCAmelCase = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Tuple[int, ...] ):
'''simple docstring'''
__lowerCAmelCase = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
__lowerCAmelCase = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def __lowerCAmelCase ( lowerCamelCase : Sequence[int] , lowerCamelCase : Sequence[int] , lowerCamelCase : Sequence[int] , lowerCamelCase : Optional[Sequence[bool]] = None , lowerCamelCase : Optional[Sequence[bool]] = None , ):
'''simple docstring'''
def reduce_edge_list(lowerCamelCase : List[bool] ) -> None:
__lowerCAmelCase = True
for i in range(len(lowerCamelCase ) ):
__lowerCAmelCase = -1 * (i + 1)
l[reversed_idx] &= tally
__lowerCAmelCase = l[reversed_idx]
if start_edges is None:
__lowerCAmelCase = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
__lowerCAmelCase = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__lowerCAmelCase = []
__lowerCAmelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
__lowerCAmelCase = tuple(lowerCamelCase )
__lowerCAmelCase = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCAmelCase = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCAmelCase = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowerCAmelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowerCAmelCase ( lowerCamelCase : torch.Tensor , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = t.shape[:no_batch_dims]
__lowerCAmelCase = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
__lowerCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
__lowerCAmelCase = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
__lowerCAmelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowerCAmelCase ( lowerCamelCase : Callable , lowerCamelCase : Dict[str, Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool = False , lowerCamelCase : Any = None , lowerCamelCase : bool = False , ):
'''simple docstring'''
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
__lowerCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
__lowerCAmelCase = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowerCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowerCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__lowerCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowerCAmelCase = tensor_tree_map(_prep_inputs , lowerCamelCase )
__lowerCAmelCase = None
if _out is not None:
__lowerCAmelCase = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__lowerCAmelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowerCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowerCAmelCase = 0
__lowerCAmelCase = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
__lowerCAmelCase = _select_chunk
else:
__lowerCAmelCase = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
__lowerCAmelCase = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
__lowerCAmelCase = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
__lowerCAmelCase = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase : dict , lowerCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowerCAmelCase = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowerCAmelCase = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowerCAmelCase = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
__lowerCAmelCase = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = 512 , ) -> Any:
__lowerCAmelCase = max_chunk_size
__lowerCAmelCase = None
__lowerCAmelCase = None
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowerCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowerCAmelCase = [c for c in candidates if c > min_chunk_size]
__lowerCAmelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCamelCase ) -> bool:
try:
with torch.no_grad():
fn(*UpperCamelCase , chunk_size=UpperCamelCase )
return True
except RuntimeError:
return False
__lowerCAmelCase = 0
__lowerCAmelCase = len(UpperCamelCase ) - 1
while i > min_viable_chunk_size_index:
__lowerCAmelCase = test_chunk_size(candidates[i] )
if not viable:
__lowerCAmelCase = (min_viable_chunk_size_index + i) // 2
else:
__lowerCAmelCase = i
__lowerCAmelCase = (i + len(UpperCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
__lowerCAmelCase = True
for aa, aa in zip(UpperCamelCase , UpperCamelCase ):
assert type(UpperCamelCase ) == type(UpperCamelCase )
if isinstance(UpperCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCamelCase , UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase : x[0] )]
__lowerCAmelCase = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase : x[0] )]
consistent &= self._compare_arg_caches(UpperCamelCase , UpperCamelCase )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> int:
__lowerCAmelCase = True
__lowerCAmelCase = tree_map(lambda UpperCamelCase : a.shape if isinstance(UpperCamelCase , torch.Tensor ) else a , UpperCamelCase , UpperCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCamelCase )
__lowerCAmelCase = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase )
else:
# Otherwise, we can reuse the precomputed value
__lowerCAmelCase = False
if not consistent:
__lowerCAmelCase = self._determine_favorable_chunk_size(
UpperCamelCase , UpperCamelCase , UpperCamelCase , )
__lowerCAmelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 39
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[str] = (CMStochasticIterativeScheduler,)
a : str = 1_0
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str:
__lowerCAmelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = 10
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps[0]
__lowerCAmelCase = scheduler.timesteps[1]
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = 1
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 1, 0]
__lowerCAmelCase = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( UpperCamelCase__ ):
# to overwrite at feature extractactor specific tests
a : int = None
a : Any = None
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase , "feature_size" ) )
self.assertTrue(hasattr(UpperCamelCase , "sampling_rate" ) )
self.assertTrue(hasattr(UpperCamelCase , "padding_value" ) )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase = feat_extract.model_input_names[0]
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase ) == len(UpperCamelCase ) for x, y in zip(UpperCamelCase , processed_features[input_name] ) ) )
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
__lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase = feat_extract.model_input_names[0]
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
__lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase = feat_extract.model_input_names[0]
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
__lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Any:
def _inputs_have_equal_length(UpperCamelCase ):
__lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase , UpperCamelCase ):
if len(UpperCamelCase ) != len(UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase , UpperCamelCase ):
if not np.allclose(np.asarray(UpperCamelCase ) , np.asarray(UpperCamelCase ) , atol=1E-3 ):
return False
return True
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase )
__lowerCAmelCase = feat_extract.model_input_names[0]
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase = self.feat_extract_tester.min_seq_length
__lowerCAmelCase = self.feat_extract_tester.batch_size
__lowerCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding=UpperCamelCase )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="longest" )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="longest" , return_tensors="np" )
__lowerCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="max_length" )[input_name]
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=UpperCamelCase , return_tensors="np" )
__lowerCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , pad_to_multiple_of=10 )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="longest" , pad_to_multiple_of=10 )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=UpperCamelCase )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=UpperCamelCase , return_tensors="np" , )
__lowerCAmelCase = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
__lowerCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(UpperCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowerCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> List[Any]:
def _inputs_have_equal_length(UpperCamelCase ):
__lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase , UpperCamelCase ):
if len(UpperCamelCase ) != len(UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase , UpperCamelCase ):
if not np.allclose(np.asarray(UpperCamelCase ) , np.asarray(UpperCamelCase ) , atol=1E-3 ):
return False
return True
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase )
__lowerCAmelCase = feat_extract.model_input_names[0]
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=UpperCamelCase )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) )
__lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
# truncate to smallest with np
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=UpperCamelCase , )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
__lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
# truncate to middle
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase , return_tensors="np" , )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
__lowerCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , truncation=UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="longest" , truncation=UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="longest" , truncation=UpperCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="max_length" , truncation=UpperCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase = 12
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase , truncation=UpperCamelCase , )
__lowerCAmelCase = input_a[input_name]
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase , )
__lowerCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
def UpperCAmelCase_ ( self ) -> Dict:
self._check_padding(numpify=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
self._check_padding(numpify=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
self._check_truncation(numpify=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
self._check_truncation(numpify=UpperCamelCase )
@require_torch
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase = feat_extract.model_input_names[0]
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="longest" , return_tensors="np" )[input_name]
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase = feat_extract.model_input_names[0]
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="longest" , return_tensors="np" )[input_name]
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.feat_extract_dict
__lowerCAmelCase = True
__lowerCAmelCase = self.feature_extraction_class(**UpperCamelCase )
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase = [len(UpperCamelCase ) for x in speech_inputs]
__lowerCAmelCase = feat_extract.model_input_names[0]
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase = feat_extract.pad(UpperCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.feat_extract_dict
__lowerCAmelCase = True
__lowerCAmelCase = self.feature_extraction_class(**UpperCamelCase )
__lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase = [len(UpperCamelCase ) for x in speech_inputs]
__lowerCAmelCase = feat_extract.model_input_names[0]
__lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase = min(UpperCamelCase )
__lowerCAmelCase = feat_extract.pad(
UpperCamelCase , padding="max_length" , max_length=UpperCamelCase , truncation=UpperCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 39
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 39
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : Tuple = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=8 ):
'''simple docstring'''
__lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=UpperCamelCase , scheduler=UpperCamelCase , movq=UpperCamelCase , )
__lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
if latents is None:
__lowerCAmelCase = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__lowerCAmelCase = latents.to(UpperCamelCase )
__lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self , UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__lowerCAmelCase = torch.device(F'''cuda:{gpu_id}''' )
__lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__lowerCAmelCase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase = cpu_offload_with_hook(UpperCamelCase , UpperCamelCase , prev_module_hook=UpperCamelCase )
# We'll offload the last model manually.
__lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase )
def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 100 , UpperCamelCase = 4.0 , UpperCamelCase = 1 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , ) -> int:
__lowerCAmelCase = self._execution_device
__lowerCAmelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = torch.cat(UpperCamelCase , dim=0 )
__lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = torch.cat(UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase = image_embeds.repeat_interleave(UpperCamelCase , dim=0 )
__lowerCAmelCase = negative_image_embeds.repeat_interleave(UpperCamelCase , dim=0 )
__lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase )
self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase )
__lowerCAmelCase = self.scheduler.timesteps
__lowerCAmelCase = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase = downscale_height_and_width(UpperCamelCase , UpperCamelCase , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase , UpperCamelCase , UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase = {"image_embeds": image_embeds}
__lowerCAmelCase = self.unet(
sample=UpperCamelCase , timestep=UpperCamelCase , encoder_hidden_states=UpperCamelCase , added_cond_kwargs=UpperCamelCase , return_dict=UpperCamelCase , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase = variance_pred.chunk(2 )
__lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(
UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase , )[0]
# post-processing
__lowerCAmelCase = self.movq.decode(UpperCamelCase , force_not_quantize=UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__lowerCAmelCase = image * 0.5 + 0.5
__lowerCAmelCase = image.clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 39
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__lowerCAmelCase = len(lowerCamelCase )
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
return len(lowerCamelCase ), len(matrix[0] )
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
print("\n".join(str(lowerCamelCase ) for line in matrix ) )
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
__lowerCAmelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase )
__lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 39
| 1
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase : Tuple = datasets.logging.get_logger(__name__)
lowerCAmelCase : List[str] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowerCAmelCase : Union[str, Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowerCAmelCase : List[Any] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=False , lowerCamelCase : Dict=False , lowerCamelCase : int=True , lowerCamelCase : str=False , lowerCamelCase : Tuple="dummy_doc" ):
'''simple docstring'''
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowerCamelCase , sys_doc_lines[doc] , lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = reader.get_mention_assignments(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = get_coref_infos(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(lowerCamelCase , lowerCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 1_00:.2f}''' , f''' Precision: {precision * 1_00:.2f}''' , f''' F1: {fa * 1_00:.2f}''' , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 1_00
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def __lowerCAmelCase ( lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def UpperCAmelCase_ ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(UpperCamelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=UpperCamelCase , sys_lines=UpperCamelCase , metrics=UpperCamelCase , NP_only=UpperCamelCase , remove_nested=UpperCamelCase , keep_singletons=UpperCamelCase , min_span=UpperCamelCase , )
return score
| 39
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 39
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Dict = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> Dict:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = False
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]:
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''' )
__lowerCAmelCase = root
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> List[Any]:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCAmelCase = DisjunctiveTrie(UpperCamelCase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = False
__lowerCAmelCase = []
def UpperCAmelCase_ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = False
self.init_state()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str:
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 39
| 1
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 39
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] = False
@property
def UpperCAmelCase_ ( self ) -> int:
return 32
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> int:
return 100
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCAmelCase = MultilingualCLIP(UpperCamelCase )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCamelCase )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase : str = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Dict = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase : Dict = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCAmelCase : int = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase : Dict = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase : Optional[Any] = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def __lowerCAmelCase ( lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowerCamelCase )
return [m.group(0 ) for m in matches]
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowerCAmelCase = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__lowerCAmelCase = collections.defaultdict(lowerCamelCase )
__lowerCAmelCase = collections.defaultdict(lowerCamelCase )
__lowerCAmelCase = collections.defaultdict(lowerCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCamelCase ):
__lowerCAmelCase = None
if _re_tf_models.match(lowerCamelCase ) is not None:
__lowerCAmelCase = tf_models
__lowerCAmelCase = _re_tf_models.match(lowerCamelCase ).groups()[0]
elif _re_flax_models.match(lowerCamelCase ) is not None:
__lowerCAmelCase = flax_models
__lowerCAmelCase = _re_flax_models.match(lowerCamelCase ).groups()[0]
elif _re_pt_models.match(lowerCamelCase ) is not None:
__lowerCAmelCase = pt_models
__lowerCAmelCase = _re_pt_models.match(lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
__lowerCAmelCase = True
break
# Try again after removing the last word in the name
__lowerCAmelCase = "".join(camel_case_split(lowerCamelCase )[:-1] )
__lowerCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__lowerCAmelCase = list(lowerCamelCase )
all_models.sort()
__lowerCAmelCase = {"model_type": all_models}
__lowerCAmelCase = [pt_models[t] for t in all_models]
__lowerCAmelCase = [tf_models[t] for t in all_models]
__lowerCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__lowerCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__lowerCAmelCase = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__lowerCAmelCase = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__lowerCAmelCase = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__lowerCAmelCase = "AutoTokenizer"
__lowerCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__lowerCAmelCase = [model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}''']
__lowerCAmelCase = [auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCamelCase , lowerCamelCase ):
continue
# First extract all model_names
__lowerCAmelCase = []
for name in getattr(lowerCamelCase , lowerCamelCase ).values():
if isinstance(lowerCamelCase , lowerCamelCase ):
model_names.append(lowerCamelCase )
else:
model_names.extend(list(lowerCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = get_frameworks_table()
__lowerCAmelCase = Dataset.from_pandas(lowerCamelCase )
__lowerCAmelCase = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=lowerCamelCase )
__lowerCAmelCase = Dataset.from_json(lowerCamelCase )
__lowerCAmelCase = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(lowerCamelCase ) )
}
__lowerCAmelCase = update_pipeline_and_auto_class_table(lowerCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__lowerCAmelCase = sorted(table.keys() )
__lowerCAmelCase = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__lowerCAmelCase = Dataset.from_pandas(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCamelCase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(lowerCamelCase , "pipeline_tags.json" ) )
if commit_sha is not None:
__lowerCAmelCase = (
f'''Update with commit {commit_sha}\n\nSee: '''
f'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
__lowerCAmelCase = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=lowerCamelCase , repo_type="dataset" , token=lowerCamelCase , commit_message=lowerCamelCase , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__lowerCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__lowerCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__lowerCAmelCase = pipeline_tasks[key]["pt"]
if isinstance(lowerCamelCase , (list, tuple) ):
__lowerCAmelCase = model[0]
__lowerCAmelCase = model.__name__
if model not in in_table.values():
missing.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
__lowerCAmelCase = ", ".join(lowerCamelCase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowerCAmelCase : int = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 39
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 39
| 1
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[str] = 1
@register_to_config
def __init__( self , UpperCamelCase=2000 , UpperCamelCase=0.1 , UpperCamelCase=20 , UpperCamelCase=1E-3 ) -> Any:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Optional[int]:
__lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase , device=UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> Tuple:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCAmelCase = std.unsqueeze(-1 )
__lowerCAmelCase = -score / std
# compute
__lowerCAmelCase = -1.0 / len(self.timesteps )
__lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCAmelCase = beta_t.unsqueeze(-1 )
__lowerCAmelCase = -0.5 * beta_t * x
__lowerCAmelCase = torch.sqrt(UpperCamelCase )
__lowerCAmelCase = drift - diffusion**2 * score
__lowerCAmelCase = x + drift * dt
# add noise
__lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase , device=x.device , dtype=x.dtype )
__lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 39
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
lowerCAmelCase : Dict[Optional[str], str] = {}
lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__lowerCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__lowerCAmelCase = format_type
def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowerCAmelCase ( lowerCamelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = get_format_type_from_alias(lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 39
| 1
|
'''simple docstring'''
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( lowerCamelCase : np.array ):
'''simple docstring'''
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = "imagenet-22k-id2label.json"
else:
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "head" in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
else:
__lowerCAmelCase = "focalnet." + name
return name
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCamelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(lowerCamelCase )
__lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase )
# verify conversion
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase ) )
def __lowerCAmelCase ( lowerCamelCase : list[list[int]] , lowerCamelCase : int , lowerCamelCase : list[int] , lowerCamelCase : int ):
'''simple docstring'''
if index == len(lowerCamelCase ):
return True
# Recursive Step
for i in range(lowerCamelCase ):
if valid_coloring(graph[index] , lowerCamelCase , lowerCamelCase ):
# Color current vertex
__lowerCAmelCase = i
# Validate coloring
if util_color(lowerCamelCase , lowerCamelCase , lowerCamelCase , index + 1 ):
return True
# Backtrack
__lowerCAmelCase = -1
return False
def __lowerCAmelCase ( lowerCamelCase : list[list[int]] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = [-1] * len(lowerCamelCase )
if util_color(lowerCamelCase , lowerCamelCase , lowerCamelCase , 0 ):
return colored_vertices
return []
| 39
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
lowerCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Dict = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**UpperCamelCase )
__lowerCAmelCase = do_lower_case
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __lowerCAmelCase ( lowerCamelCase : int ):
'''simple docstring'''
def wrapper(*lowerCamelCase : Any , **lowerCamelCase : List[Any] ):
__lowerCAmelCase = timeit.default_timer()
__lowerCAmelCase = func(*lowerCamelCase , **lowerCamelCase )
__lowerCAmelCase = timeit.default_timer() - starttime
return delta
__lowerCAmelCase = func.__name__
return wrapper
def __lowerCAmelCase ( lowerCamelCase : dict , lowerCamelCase : Tuple=1_00 , lowerCamelCase : Any=None ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = seq_shapes or {}
for i in range(lowerCamelCase ):
__lowerCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCamelCase , _ArrayXD ):
__lowerCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCamelCase , datasets.Value ):
if v.dtype == "string":
__lowerCAmelCase = "The small grey turtle was surprisingly fast when challenged."
else:
__lowerCAmelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCamelCase , datasets.Sequence ):
while isinstance(lowerCamelCase , datasets.Sequence ):
__lowerCAmelCase = v.feature
__lowerCAmelCase = seq_shapes[k]
__lowerCAmelCase = np.random.rand(*lowerCamelCase ).astype(v.dtype )
__lowerCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict=1_00 , lowerCamelCase : int=None ):
'''simple docstring'''
__lowerCAmelCase = generate_examples(lowerCamelCase , num_examples=lowerCamelCase , seq_shapes=lowerCamelCase )
with ArrowWriter(features=lowerCamelCase , path=lowerCamelCase ) as writer:
for key, record in dummy_data:
__lowerCAmelCase = features.encode_example(lowerCamelCase )
writer.write(lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
__lowerCAmelCase = datasets.Dataset.from_file(filename=lowerCamelCase , info=datasets.DatasetInfo(features=lowerCamelCase ) )
return dataset
| 39
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(lowerCamelCase ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 1
|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase ( lowerCamelCase : int = 1_00_00_00 , lowerCamelCase : int = 10 ):
'''simple docstring'''
__lowerCAmelCase = defaultdict(lowerCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'{solution() = }')
| 39
|
'''simple docstring'''
import re
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 39
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCamelCase__ ):
a : str = ["""flax""", """transformers"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Any:
requires_backends(cls , ["flax", "transformers"] )
class UpperCAmelCase__ ( metaclass=UpperCamelCase__ ):
a : str = ["""flax""", """transformers"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> List[Any]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls , *UpperCamelCase , **UpperCamelCase ) -> List[str]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(cls , ["flax", "transformers"] )
class UpperCAmelCase__ ( metaclass=UpperCamelCase__ ):
a : List[str] = ["""flax""", """transformers"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(cls , ["flax", "transformers"] )
class UpperCAmelCase__ ( metaclass=UpperCamelCase__ ):
a : List[str] = ["""flax""", """transformers"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls , *UpperCamelCase , **UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase_ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["flax", "transformers"] )
| 39
|
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {"BertModelTest": "BertModelTester"}
__lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=3 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=10 , UpperCamelCase=[8, 16, 32, 64] , UpperCamelCase=[1, 1, 2, 1] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=3 , UpperCamelCase=None , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=[2, 3, 4] , UpperCamelCase=1 , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
__lowerCAmelCase = num_groups
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Dict:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
__lowerCAmelCase = BitModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = BitForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
__lowerCAmelCase = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase = None
__lowerCAmelCase = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a : int = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
a : Dict = False
a : Optional[Any] = False
a : str = False
a : List[Any] = False
a : str = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = BitModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ) -> Dict:
return
@unittest.skip(reason="Bit does not output attentions" )
def UpperCAmelCase_ ( self ) -> int:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def UpperCAmelCase_ ( self ) -> int:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(UpperCamelCase )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=UpperCamelCase )
for name, module in model.named_modules():
if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCAmelCase_ ( self ) -> int:
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase = layer_type
__lowerCAmelCase = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = BitModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self ) -> str:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**UpperCamelCase )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
__lowerCAmelCase = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 ) )
@require_torch
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : Any = (BitBackbone,) if is_torch_available() else ()
a : str = BitConfig
a : Optional[Any] = False
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = BitModelTester(self )
| 39
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]:
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = self.proj_in(UpperCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , )
# 3. Output
__lowerCAmelCase = self.proj_out(UpperCamelCase )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : int ):
'''simple docstring'''
if num <= 0:
__lowerCAmelCase = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCamelCase )
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = []
__lowerCAmelCase = 2
__lowerCAmelCase = int(math.sqrt(lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase ):
if sieve[i] is True:
__lowerCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 39
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 39
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.