code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''', '''torchsde''']
def __init__( self : int , *lowercase_ : Optional[Any] , **lowercase_ : str ):
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : str ):
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : str , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch""", """torchsde"""] )
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def SCREAMING_SNAKE_CASE_ ( self : Dict , **lowercase_ : Dict ):
lowercase_ : Tuple = {"""num_train_timesteps""": 1000}
config.update(**lowercase_ )
return config
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Optional[Any]=0 , **lowercase_ : Any ):
lowercase_ : Union[str, Any] = dict(self.forward_default_kwargs )
lowercase_ : List[Any] = kwargs.pop("""num_inference_steps""" , lowercase_ )
lowercase_ : Any = self.dummy_sample
lowercase_ : Optional[Any] = 0.1 * sample
lowercase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase_ : Optional[Any] = self.get_scheduler_config(**lowercase_ )
lowercase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowercase_ : Optional[int] = dummy_past_residuals[:]
if time_step is None:
lowercase_ : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowercase_ : Any = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowercase_ : Dict = dummy_past_residuals[:]
lowercase_ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ : Any = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase_ : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ : Dict = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int=0 , **lowercase_ : Any ):
lowercase_ : Any = dict(self.forward_default_kwargs )
lowercase_ : Dict = kwargs.pop("""num_inference_steps""" , lowercase_ )
lowercase_ : Tuple = self.dummy_sample
lowercase_ : Optional[Any] = 0.1 * sample
lowercase_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase_ : Any = self.get_scheduler_config()
lowercase_ : Any = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ : Any = dummy_past_residuals[:]
if time_step is None:
lowercase_ : int = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowercase_ : List[Any] = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ : Union[str, Any] = dummy_past_residuals[:]
lowercase_ : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ : Dict = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase_ : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ : Optional[Any] = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : List[Any] ):
lowercase_ : str = self.scheduler_classes[0]
lowercase_ : str = self.get_scheduler_config(**lowercase_ )
lowercase_ : Dict = scheduler_class(**lowercase_ )
lowercase_ : List[Any] = 10
lowercase_ : int = self.dummy_model()
lowercase_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : str = model(lowercase_ , lowercase_ )
lowercase_ : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : Dict = model(lowercase_ , lowercase_ )
lowercase_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = dict(self.forward_default_kwargs )
lowercase_ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , lowercase_ )
for scheduler_class in self.scheduler_classes:
lowercase_ : Optional[int] = self.get_scheduler_config()
lowercase_ : Tuple = scheduler_class(**lowercase_ )
lowercase_ : List[str] = self.dummy_sample
lowercase_ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , """set_timesteps""" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , """set_timesteps""" ):
lowercase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase_ : Tuple = dummy_past_residuals[:]
lowercase_ : int = scheduler.timesteps[5]
lowercase_ : List[Any] = scheduler.timesteps[6]
lowercase_ : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase_ : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ , time_step=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ , time_step=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = self.full_loop()
lowercase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
import operator as op
_lowercase : List[Any] = "scaler.pt"
_lowercase : int = "pytorch_model"
_lowercase : str = "random_states"
_lowercase : int = "optimizer"
_lowercase : List[Any] = "scheduler"
_lowercase : Tuple = "pytorch_model.bin"
_lowercase : Tuple = "pytorch_model.bin.index.json"
_lowercase : Dict = "model.safetensors"
_lowercase : List[Any] = "model.safetensors.index.json"
_lowercase : List[Any] = "1.10.2"
_lowercase : Optional[Any] = "py38"
_lowercase : Optional[Any] = "4.17.0"
_lowercase : List[str] = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
_lowercase : Any = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
_lowercase : Union[str, Any] = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
_lowercase : List[str] = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
_lowercase : List[str] = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
_lowercase : Dict = "2.0.1"
_lowercase : Tuple = ["pdsh", "standard", "openmpi", "mvapich"]
_lowercase : Dict = ["default", "reduce-overhead", "max-autotune"]
_lowercase : Optional[int] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_lowercase : Any = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
_lowercase : Union[str, Any] = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
_lowercase : Any = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __magic_name__ ( _UpperCAmelCase):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : int = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowercase_ : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase_ : Optional[int] = bertabert.config.encoder.vocab_size
lowercase_ : int = tokenizer.sep_token_id
lowercase_ : int = tokenizer.cls_token_id
lowercase_ : Tuple = 128
lowercase_ : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowercase_ : Any = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowercase_ : int = train_dataset.select(range(32 ) )
lowercase_ : List[Any] = val_dataset.select(range(16 ) )
lowercase_ : Any = 4
def _map_to_encoder_decoder_inputs(lowercase_ : List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase_ : Any = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowercase_ , max_length=512 )
lowercase_ : Optional[Any] = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowercase_ , max_length=128 )
lowercase_ : Optional[int] = inputs.input_ids
lowercase_ : List[Any] = inputs.attention_mask
lowercase_ : Tuple = outputs.input_ids
lowercase_ : List[Any] = outputs.input_ids.copy()
lowercase_ : Union[str, Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowercase_ : Union[str, Any] = outputs.attention_mask
assert all(len(lowercase_ ) == 512 for x in inputs.input_ids )
assert all(len(lowercase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase_ : str ):
lowercase_ : Optional[int] = pred.label_ids
lowercase_ : int = pred.predictions
# all unnecessary tokens are removed
lowercase_ : int = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowercase_ : str = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowercase_ : Any = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ )
return {"accuracy": accuracy}
# map train dataset
lowercase_ : Any = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowercase_ : Dict = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowercase_ : Any = self.get_auto_remove_tmp_dir()
lowercase_ : Optional[Any] = SeqaSeqTrainingArguments(
output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy="""steps""" , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase_ : Dict = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , )
# start training
trainer.train()
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : Union[str, Any] = ["text", "image", "audio"]
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> List[Any]:
lowercase_ : Dict = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
inputs.append(create_inputs(UpperCAmelCase__ ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowerCamelCase ( UpperCAmelCase__ : List ) -> List[Any]:
lowercase_ : str = []
for output in outputs:
if isinstance(UpperCAmelCase__ , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(UpperCAmelCase__ , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(UpperCAmelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __magic_name__ :
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
lowercase_ : List[str] = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowercase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*lowercase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(lowercase_ ) , self.tool.outputs )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[Any] = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = self.tool(*lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(lowercase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowercase_ , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : Dict = []
for _input, input_type in zip(lowercase_ , self.tool.inputs ):
if isinstance(lowercase_ , lowercase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Dict = self.tool(*lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = [outputs]
self.assertEqual(len(lowercase_ ) , len(self.tool.outputs ) )
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Tuple , lowercase_ : Any , lowercase_ : Any=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=18 , lowercase_ : str=30 , lowercase_ : Tuple=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , ):
lowercase_ : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase_ : Dict = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : Optional[int] = image_size
lowercase_ : Tuple = min_resolution
lowercase_ : Tuple = max_resolution
lowercase_ : Optional[int] = do_resize
lowercase_ : Optional[int] = size
lowercase_ : Any = apply_ocr
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """apply_ocr""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# Initialize image_processing
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowercase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , lowercase_ )
self.assertIsInstance(encoding.boxes , lowercase_ )
# Test batched
lowercase_ : Dict = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowercase_ : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowercase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowercase_ : Optional[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# with apply_OCR = True
lowercase_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase_ : Any = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
lowercase_ : Tuple = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowercase_ : Union[str, Any] = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowercase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase_ )
self.assertListEqual(encoding.boxes , lowercase_ )
# with apply_OCR = False
lowercase_ : List[Any] = LayoutLMvaImageProcessor(apply_ocr=lowercase_ )
lowercase_ : Any = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
_lowercase : Optional[Any] = tuple[int, int, int]
_lowercase : List[Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_lowercase : List[str] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
_lowercase : Optional[int] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
_lowercase : str = "FOBHMDKEXQNRAULPGSJVTYICZW"
_lowercase : Dict = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
_lowercase : str = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
_lowercase : List[str] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
_lowercase : Any = "SGLCPQWZHKXAREONTFBVIYJUDM"
_lowercase : List[str] = "HVSICLTYKQUBXDWAJZOMFGPREN"
_lowercase : str = "RZWQHFMVDBKICJLNTUXAGYPSOE"
_lowercase : Optional[Any] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
_lowercase : Union[str, Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def lowerCamelCase ( UpperCAmelCase__ : RotorPositionT , UpperCAmelCase__ : RotorSelectionT , UpperCAmelCase__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(UpperCAmelCase__ ) )) < 3:
lowercase_ : Union[str, Any] = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(UpperCAmelCase__ )
# Checks if rotor positions are valid
lowercase_ , lowercase_ , lowercase_ : List[Any] = rotpos
if not 0 < rotorposa <= len(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(UpperCAmelCase__ )
if not 0 < rotorposa <= len(UpperCAmelCase__ ):
lowercase_ : List[str] = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(UpperCAmelCase__ )
if not 0 < rotorposa <= len(UpperCAmelCase__ ):
lowercase_ : Dict = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(UpperCAmelCase__ )
# Validates string and returns dict
lowercase_ : Optional[int] = _plugboard(UpperCAmelCase__ )
return rotpos, rotsel, pbdict
def lowerCamelCase ( UpperCAmelCase__ : str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''Plugboard setting isn\'t type string ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
elif len(UpperCAmelCase__ ) % 2 != 0:
lowercase_ : Tuple = F'''Odd number of symbols ({len(UpperCAmelCase__ )})'''
raise Exception(UpperCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
lowercase_ : List[Any] = set()
for i in pbstring:
if i not in abc:
lowercase_ : Optional[int] = F'''\'{i}\' not in list of symbols'''
raise Exception(UpperCAmelCase__ )
elif i in tmppbl:
lowercase_ : int = F'''Duplicate symbol ({i})'''
raise Exception(UpperCAmelCase__ )
else:
tmppbl.add(UpperCAmelCase__ )
del tmppbl
# Created the dictionary
lowercase_ : int = {}
for j in range(0 , len(UpperCAmelCase__ ) - 1 , 2 ):
lowercase_ : int = pbstring[j + 1]
lowercase_ : Optional[Any] = pbstring[j]
return pb
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : RotorPositionT , UpperCAmelCase__ : RotorSelectionT = (rotora, rotora, rotora) , UpperCAmelCase__ : str = "" , ) -> str:
lowercase_ : Any = text.upper()
lowercase_ , lowercase_ , lowercase_ : Any = _validator(
UpperCAmelCase__ , UpperCAmelCase__ , plugb.upper() )
lowercase_ , lowercase_ , lowercase_ : List[Any] = rotor_position
lowercase_ , lowercase_ , lowercase_ : Optional[int] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase_ : List[str] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase_ : Dict = plugboard[symbol]
# rotor ra --------------------------
lowercase_ : Optional[int] = abc.index(UpperCAmelCase__ ) + rotorposa
lowercase_ : Dict = rotora[index % len(UpperCAmelCase__ )]
# rotor rb --------------------------
lowercase_ : str = abc.index(UpperCAmelCase__ ) + rotorposa
lowercase_ : List[Any] = rotora[index % len(UpperCAmelCase__ )]
# rotor rc --------------------------
lowercase_ : Dict = abc.index(UpperCAmelCase__ ) + rotorposa
lowercase_ : List[Any] = rotora[index % len(UpperCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase_ : Union[str, Any] = reflector[symbol]
# 2nd rotors
lowercase_ : List[str] = abc[rotora.index(UpperCAmelCase__ ) - rotorposa]
lowercase_ : List[str] = abc[rotora.index(UpperCAmelCase__ ) - rotorposa]
lowercase_ : Union[str, Any] = abc[rotora.index(UpperCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase_ : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(UpperCAmelCase__ ):
lowercase_ : int = 0
rotorposa += 1
if rotorposa >= len(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(UpperCAmelCase__ ):
lowercase_ : List[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : List[Any] = "This is my Python script that emulates the Enigma machine from WWII."
_lowercase : Tuple = (1, 1, 1)
_lowercase : str = "pictures"
_lowercase : Any = (rotora, rotora, rotora)
_lowercase : Optional[int] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Optional[Any] = 16
_lowercase : int = 32
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : str = "bert-base-cased" ) -> Tuple:
lowercase_ : str = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowercase_ : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase__ : str ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase_ : Optional[Any] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(UpperCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowercase_ : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Tuple:
model.eval()
lowercase_ : List[str] = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**UpperCAmelCase__ )
lowercase_ : int = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase_ , lowercase_ : List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
lowercase_ : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase_ : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
lowercase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) -> List[str]:
# Initialize accelerator
lowercase_ : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : List[Any] = config["""lr"""]
lowercase_ : int = int(config["""num_epochs"""] )
lowercase_ : Dict = int(config["""seed"""] )
lowercase_ : Optional[Any] = int(config["""batch_size"""] )
lowercase_ : Tuple = args.model_name_or_path
set_seed(UpperCAmelCase__ )
lowercase_ , lowercase_ : Tuple = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : int = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
lowercase_ : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase_ : str = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowercase_ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowercase_ : List[Any] = 1
lowercase_ : List[str] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
lowercase_ : str = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
lowercase_ : Any = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase_ : Optional[Any] = 0
lowercase_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
lowercase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowercase_ : List[str] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowercase_ : Optional[int] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowercase_ : str = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowercase_ : Union[str, Any] = int(UpperCAmelCase__ ) + 1
lowercase_ : Optional[Any] = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.print("""resumed checkpoint performance:""" , UpperCAmelCase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , """r""" ) as f:
lowercase_ : Any = json.load(UpperCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowercase_ : Optional[int] = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = model(**UpperCAmelCase__ )
lowercase_ : Dict = outputs.loss
lowercase_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowercase_ : List[str] = F'''epoch_{epoch}'''
lowercase_ : Dict = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
lowercase_ : Dict = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[int] = accuracy
lowercase_ : List[str] = lr_scheduler.get_lr()[0]
lowercase_ : List[str] = optimizer.param_groups[0]["""lr"""]
lowercase_ : List[str] = epoch
lowercase_ : List[str] = overall_step
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , """w""" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( ) -> str:
lowercase_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=UpperCAmelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCAmelCase__ , )
parser.add_argument(
"""--output_dir""" , type=UpperCAmelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=UpperCAmelCase__ , default=2 , help="""Number of train epochs.""" , )
lowercase_ : Dict = parser.parse_args()
lowercase_ : Tuple = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : int = 22 ) -> int:
lowercase_ : List[Any] = range(1 , UpperCAmelCase__ )
lowercase_ : Tuple = range(1 , UpperCAmelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_lowercase : Any = True
except (ImportError, ModuleNotFoundError):
_lowercase : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
re.sub("""<n>""" , """""" , UpperCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase__ ) )
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : bool , ) -> tuple[float | int, list[tuple[int, int]]]:
lowercase_ , lowercase_ : Optional[Any] = grid.shape
lowercase_ : str = [-1, 1, 0, 0]
lowercase_ : int = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase_ , lowercase_ : Dict = [(0, source)], set()
lowercase_ : Optional[Any] = np.full((rows, cols) , np.inf )
lowercase_ : Optional[Any] = 0
lowercase_ : Optional[int] = np.empty((rows, cols) , dtype=UpperCAmelCase__ )
lowercase_ : Dict = None
while queue:
((lowercase_) , (lowercase_)) : List[Any] = heappop(UpperCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase_ : Any = []
while (x, y) != source:
path.append((x, y) )
lowercase_ , lowercase_ : int = predecessors[x, y]
path.append(UpperCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase__ ) ):
lowercase_ , lowercase_ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase_ : Optional[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase__ , (dist + 1, (nx, ny)) )
lowercase_ : Any = dist + 1
lowercase_ : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[int] , lowercase_ : Optional[Any]=4 , lowercase_ : Any=3 , lowercase_ : List[str]=64 , lowercase_ : List[str]=[3, 4, 6, 5] , lowercase_ : Optional[int]=[2, 4, 8, 16] , lowercase_ : List[str]=7 , lowercase_ : int=3.0 , lowercase_ : Tuple=True , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : int=0.1 , lowercase_ : str="gelu" , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : int=0.0 , lowercase_ : int=None , lowercase_ : Union[str, Any]=None , **lowercase_ : str , ):
super().__init__(**lowercase_ )
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embed_dim
lowercase_ : Any = depths
lowercase_ : Dict = len(lowercase_ )
lowercase_ : Optional[Any] = num_heads
lowercase_ : Union[str, Any] = kernel_size
lowercase_ : Any = mlp_ratio
lowercase_ : List[str] = qkv_bias
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : int = drop_path_rate
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : str = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowercase_ : Optional[Any] = layer_scale_init_value
lowercase_ : Union[str, Any] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowercase_ , lowercase_ : int = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Any=0.2 , lowercase_ : int=0.2 ):
lowercase_ : Optional[int] = bp_numa
lowercase_ : int = bp_numa
lowercase_ : Optional[int] = bp_numa
lowercase_ : List[str] = conva_get[:2]
lowercase_ : str = conva_get[2]
lowercase_ : Tuple = size_pa
lowercase_ : Union[str, Any] = rate_w
lowercase_ : Optional[Any] = rate_t
lowercase_ : str = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase_ : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] ):
# save model dict with pickle
lowercase_ : int = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase_ , """wb""" ) as f:
pickle.dump(lowercase_ , lowercase_ )
print(f'''Model saved: {save_path}''' )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , lowercase_ : List[Any] ):
# read saved model
with open(lowercase_ , """rb""" ) as f:
lowercase_ : List[Any] = pickle.load(lowercase_ ) # noqa: S301
lowercase_ : Any = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
lowercase_ : Tuple = model_dic.get("""size_pooling1""" )
lowercase_ : Tuple = model_dic.get("""num_bp1""" )
lowercase_ : Dict = model_dic.get("""num_bp2""" )
lowercase_ : Tuple = model_dic.get("""num_bp3""" )
lowercase_ : List[str] = model_dic.get("""rate_weight""" )
lowercase_ : Optional[int] = model_dic.get("""rate_thre""" )
# create model instance
lowercase_ : List[str] = CNN(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# modify model parameter
lowercase_ : int = model_dic.get("""w_conv1""" )
lowercase_ : str = model_dic.get("""wkj""" )
lowercase_ : List[Any] = model_dic.get("""vji""" )
lowercase_ : Union[str, Any] = model_dic.get("""thre_conv1""" )
lowercase_ : Union[str, Any] = model_dic.get("""thre_bp2""" )
lowercase_ : List[str] = model_dic.get("""thre_bp3""" )
return conv_ins
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
return 1 / (1 + np.exp(-1 * x ))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] ):
return round(lowercase_ , 3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Dict ):
# convolution process
lowercase_ : List[str] = convs[0]
lowercase_ : Tuple = convs[1]
lowercase_ : Tuple = np.shape(lowercase_ )[0]
# get the data slice of original image data, data_focus
lowercase_ : Dict = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase_ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase_ ):
lowercase_ : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : List[Any] = []
lowercase_ : int = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase_ ):
lowercase_ : Any = []
for i_focus in range(len(lowercase_ ) ):
lowercase_ : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase_ ) )
lowercase_ : Optional[int] = np.asmatrix(lowercase_ ).reshape(
lowercase_ , lowercase_ )
data_featuremap.append(lowercase_ )
# expanding the data slice to One dimenssion
lowercase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase_ ) )
lowercase_ : Tuple = np.asarray(lowercase_ )
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Tuple="average_pool" ):
# pooling process
lowercase_ : int = len(featuremaps[0] )
lowercase_ : Optional[int] = int(size_map / size_pooling )
lowercase_ : Dict = []
for i_map in range(len(lowercase_ ) ):
lowercase_ : Any = featuremaps[i_map]
lowercase_ : List[Any] = []
for i_focus in range(0 , lowercase_ , lowercase_ ):
for j_focus in range(0 , lowercase_ , lowercase_ ):
lowercase_ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase_ ) )
lowercase_ : int = np.asmatrix(lowercase_ ).reshape(lowercase_ , lowercase_ )
featuremap_pooled.append(lowercase_ )
return featuremap_pooled
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any] ):
# expanding three dimension data to one dimension list
lowercase_ : Optional[int] = []
for i in range(len(lowercase_ ) ):
lowercase_ : List[str] = np.shape(data[i] )
lowercase_ : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase_ : int = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase_ )
lowercase_ : Union[str, Any] = np.asarray(lowercase_ )
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Dict ):
# expanding matrix to one dimension list
lowercase_ : Optional[int] = np.asarray(lowercase_ )
lowercase_ : Optional[Any] = np.shape(lowercase_ )
lowercase_ : Tuple = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ):
lowercase_ : Tuple = []
lowercase_ : List[str] = 0
for i_map in range(lowercase_ ):
lowercase_ : Any = np.ones((size_map, size_map) )
for i in range(0 , lowercase_ , lowercase_ ):
for j in range(0 , lowercase_ , lowercase_ ):
lowercase_ : List[str] = pd_pool[
i_pool
]
lowercase_ : Tuple = i_pool + 1
lowercase_ : Dict = np.multiply(
lowercase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase_ )
return pd_all
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Any=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowercase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowercase_ )) )
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = []
lowercase_ : str = 10000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : Any = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowercase_ ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : Any = np.asmatrix(datas_train[p] )
lowercase_ : Optional[int] = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : List[Any] = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase_ : List[Any] = self.pooling(lowercase_ , self.size_poolinga )
lowercase_ : List[str] = np.shape(lowercase_ )
lowercase_ : Dict = self._expand(lowercase_ )
lowercase_ : Tuple = data_bp_input
lowercase_ : Union[str, Any] = np.dot(lowercase_ , self.vji.T ) - self.thre_bpa
lowercase_ : Tuple = self.sig(lowercase_ )
lowercase_ : Tuple = np.dot(lowercase_ , self.wkj.T ) - self.thre_bpa
lowercase_ : Tuple = self.sig(lowercase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : int = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase_ , (1 - bp_outa) ) )
lowercase_ : str = np.multiply(
np.dot(lowercase_ , self.wkj ) , np.multiply(lowercase_ , (1 - bp_outa) ) )
lowercase_ : List[Any] = np.dot(lowercase_ , self.vji )
lowercase_ : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Tuple = pd_conva_pooled.T.getA().tolist()
lowercase_ : Any = self._calculate_gradient_from_pool(
lowercase_ , lowercase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Dict = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Union[str, Any] = self.rate_weight * np.dot(lowercase_ , lowercase_ )
lowercase_ : str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : Any = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : Dict = rp + 1
lowercase_ : Any = error_count / patterns
all_mse.append(lowercase_ )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase_ , """+-""" )
plt.plot(lowercase_ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowercase_ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[str] ):
# model predict
lowercase_ : List[Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowercase_ )) )
for p in range(len(lowercase_ ) ):
lowercase_ : Any = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Any = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase_ : List[str] = self.pooling(lowercase_ , self.size_poolinga )
lowercase_ : List[Any] = self._expand(lowercase_ )
lowercase_ : str = data_bp_input
lowercase_ : int = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : Union[str, Any] = self.sig(lowercase_ )
lowercase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Any = self.sig(lowercase_ )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[Any] = [list(map(self.do_round , lowercase_ ) ) for each in produce_out]
return np.asarray(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
# return the data of image after convoluting process so we can check it out
lowercase_ : str = np.asmatrix(lowercase_ )
lowercase_ , lowercase_ : Optional[int] = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase_ : Dict = self.pooling(lowercase_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''dandelin/vilt-b32-finetuned-vqa'''
UpperCamelCase__ = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
UpperCamelCase__ = '''image_qa'''
UpperCamelCase__ = AutoProcessor
UpperCamelCase__ = AutoModelForVisualQuestionAnswering
UpperCamelCase__ = ['''image''', '''text''']
UpperCamelCase__ = ['''text''']
def __init__( self : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "Image" , lowercase_ : str ):
return self.pre_processor(lowercase_ , lowercase_ , return_tensors="""pt""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Optional[int] ):
with torch.no_grad():
return self.model(**lowercase_ ).logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int ):
lowercase_ : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : str ):
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
debug_launcher(test_ops.main )
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
from functools import reduce
_lowercase : List[str] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCamelCase ( UpperCAmelCase__ : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCAmelCase__ , UpperCAmelCase__ : str(int(UpperCAmelCase__ ) * int(UpperCAmelCase__ ) ) , n[i : i + 13] ) )
for i in range(len(UpperCAmelCase__ ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
if not (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
lowercase_ : Optional[int] = len(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ )
lowercase_ : str = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowercase_ : Union[str, Any] = 0
lowercase_ : Dict = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowercase_ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowercase_ : List[Any] = i
lowercase_ : str = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
lowercase_ : Optional[int] = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase ( UpperCAmelCase__ : str ) -> dict[str, str]:
lowercase_ : Optional[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowercase_ : str = remove_duplicates(key.upper() )
lowercase_ : Optional[int] = len(UpperCAmelCase__ )
# First fill cipher with key characters
lowercase_ : List[str] = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCAmelCase__ ) , 26 ):
lowercase_ : Optional[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowercase_ : List[str] = alphabet[i - offset]
lowercase_ : List[str] = char
return cipher_alphabet
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : dict[str, str] ) -> str:
return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : dict[str, str] ) -> str:
lowercase_ : Optional[int] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() )
def lowerCamelCase ( ) -> None:
lowercase_ : Tuple = input("""Enter message to encode or decode: """ ).strip()
lowercase_ : Optional[int] = input("""Enter keyword: """ ).strip()
lowercase_ : Tuple = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
lowercase_ : int = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
lowercase_ : List[Any] = create_cipher_map(UpperCAmelCase__ )
print(func(UpperCAmelCase__ , UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
_lowercase : Union[str, Any] = parser.parse_args()
_lowercase : Tuple = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = KandinskyInpaintPipeline
UpperCamelCase__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
UpperCamelCase__ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
UpperCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ = False
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
torch.manual_seed(0 )
lowercase_ : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase_ : List[str] = MultilingualCLIP(lowercase_ )
lowercase_ : Any = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
torch.manual_seed(0 )
lowercase_ : int = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase_ : Dict = UNetaDConditionModel(**lowercase_ )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
torch.manual_seed(0 )
lowercase_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = self.dummy_text_encoder
lowercase_ : int = self.dummy_tokenizer
lowercase_ : List[str] = self.dummy_unet
lowercase_ : int = self.dummy_movq
lowercase_ : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowercase_ , )
lowercase_ : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict=0 ):
lowercase_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowercase_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase_ )
# create init_image
lowercase_ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowercase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Dict = Image.fromarray(np.uinta(lowercase_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase_ : Tuple = np.ones((64, 64) , dtype=np.floataa )
lowercase_ : Any = 0
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
lowercase_ : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : Union[str, Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = """cpu"""
lowercase_ : Tuple = self.get_dummy_components()
lowercase_ : List[Any] = self.pipeline_class(**lowercase_ )
lowercase_ : Dict = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase_ ) )
lowercase_ : Dict = output.images
lowercase_ : str = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
lowercase_ : List[Any] = image[0, -3:, -3:, -1]
lowercase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
lowercase_ : Optional[Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase_ : Dict = np.ones((768, 768) , dtype=np.floataa )
lowercase_ : Union[str, Any] = 0
lowercase_ : Optional[Any] = """a hat"""
lowercase_ : List[str] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
lowercase_ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase_ : Tuple = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ , lowercase_ : Any = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase_ : Optional[int] = pipeline(
lowercase_ , image=lowercase_ , mask_image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase_ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
import enum
import shutil
import sys
_lowercase , _lowercase : List[Any] = shutil.get_terminal_size()
_lowercase : Any = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __magic_name__ ( enum.Enum):
UpperCamelCase__ = 0
UpperCamelCase__ = 1
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple="" ) -> List[str]:
sys.stdout.write(str(UpperCAmelCase__ ) + end )
sys.stdout.flush()
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict="" ) -> Tuple:
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , UpperCAmelCase__ )
def lowerCamelCase ( ) -> List[str]:
forceWrite("""\r""" )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : str ) -> Optional[int]:
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowerCamelCase ( ) -> List[Any]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def lowerCamelCase ( ) -> Dict:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
lowercase_ : Optional[int] = filter(lambda UpperCAmelCase__ : p.requires_grad , model.parameters() )
lowercase_ : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowercase : List[str] = logging.getLogger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ) -> Dict:
if metric == "rouge2":
lowercase_ : Tuple = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
lowercase_ : Any = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
lowercase_ : Dict = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
lowercase_ : List[Any] = ModelCheckpoint(
dirpath=UpperCAmelCase__ , filename=UpperCAmelCase__ , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) -> Dict:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=UpperCAmelCase__ , verbose=UpperCAmelCase__ , )
class __magic_name__ ( pl.Callback):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[Any] = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase_ )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule , lowercase_ : str , lowercase_ : Tuple=True ):
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowercase_ : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
lowercase_ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase_ : List[str] = od / """test_results.txt"""
lowercase_ : Union[str, Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowercase_ : Union[str, Any] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowercase_ )
generations_file.parent.mkdir(exist_ok=lowercase_ )
with open(lowercase_ , """a+""" ) as writer:
for key in sorted(lowercase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ : Any = metrics[key]
if isinstance(lowercase_ , torch.Tensor ):
lowercase_ : Optional[Any] = val.item()
lowercase_ : Optional[int] = f'''{key}: {val:.6f}\n'''
writer.write(lowercase_ )
if not save_generations:
return
if "preds" in metrics:
lowercase_ : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase_ )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple ):
try:
lowercase_ : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ : Optional[Any] = pl_module.model.num_parameters()
lowercase_ : Dict = count_trainable_parameters(lowercase_ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase_ , lowercase_ , """test""" )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : Optional[int] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str = " " ) -> list:
lowercase_ : List[Any] = []
lowercase_ : int = 0
for index, char in enumerate(UpperCAmelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
lowercase_ : List[str] = index + 1
elif index + 1 == len(UpperCAmelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_lowercase : Union[str, Any] = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''albert'''
def __init__( self : List[Any] , lowercase_ : List[str]=30000 , lowercase_ : Optional[Any]=128 , lowercase_ : List[str]=4096 , lowercase_ : str=12 , lowercase_ : List[Any]=1 , lowercase_ : List[str]=64 , lowercase_ : Optional[int]=16384 , lowercase_ : List[Any]=1 , lowercase_ : List[str]="gelu_new" , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[Any]=0 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : int=1E-12 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Dict="absolute" , lowercase_ : Any=0 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=3 , **lowercase_ : str , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : List[str] = vocab_size
lowercase_ : List[Any] = embedding_size
lowercase_ : Tuple = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Dict = num_hidden_groups
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : Dict = inner_group_num
lowercase_ : List[str] = hidden_act
lowercase_ : int = intermediate_size
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = classifier_dropout_prob
lowercase_ : Any = position_embedding_type
class __magic_name__ ( _UpperCAmelCase):
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
if self.task == "multiple-choice":
lowercase_ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase : Optional[int] = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[Any] , **lowercase_ : Tuple ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : Dict = deprecated_arg[3:]
lowercase_ : Dict = not kwargs.pop(lowercase_ )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Optional[Any] = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase_ : Optional[int] = kwargs.pop("""device_idx""" , self.device_idx )
lowercase_ : List[Any] = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase_ : Dict = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**lowercase_ )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Name of TPU'''}, )
UpperCamelCase__ = field(
default=0, metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''}, )
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Benchmark models in eager model.'''})
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
}, )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
requires_backends(self , ["""tf"""] )
lowercase_ : Tuple = None
if self.tpu:
try:
if self.tpu_name:
lowercase_ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase_ : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase_ : Optional[int] = None
return tpu
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase_ : str = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase_ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase_ : Dict = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return self.n_gpu > 0
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowercase_ : Optional[Any] = XCLIPTextConfig()
# derive patch size from model name
lowercase_ : Dict = model_name.find("""patch""" )
lowercase_ : Dict = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
lowercase_ : Optional[int] = XCLIPVisionConfig(patch_size=UpperCAmelCase__ , num_frames=UpperCAmelCase__ )
if "large" in model_name:
lowercase_ : List[str] = 768
lowercase_ : Optional[int] = 3072
lowercase_ : Dict = 12
lowercase_ : Dict = 1024
lowercase_ : Any = 4096
lowercase_ : List[str] = 16
lowercase_ : str = 24
lowercase_ : int = 768
lowercase_ : Tuple = 3072
if model_name == "xclip-large-patch14-16-frames":
lowercase_ : Any = 336
lowercase_ : List[str] = XCLIPConfig.from_text_vision_configs(UpperCAmelCase__ , UpperCAmelCase__ )
if "large" in model_name:
lowercase_ : List[Any] = 768
return config
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
# text encoder
if name == "token_embedding.weight":
lowercase_ : List[str] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
lowercase_ : List[str] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
lowercase_ : str = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase_ : List[str] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase_ : int = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase_ : Dict = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
lowercase_ : Optional[Any] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
lowercase_ : Optional[int] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
lowercase_ : Optional[int] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
lowercase_ : Any = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
lowercase_ : str = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
lowercase_ : List[Any] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
lowercase_ : Dict = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
lowercase_ : List[Any] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
lowercase_ : List[str] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
lowercase_ : Any = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
lowercase_ : int = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
lowercase_ : Optional[Any] = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
lowercase_ : Union[str, Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
lowercase_ : List[Any] = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
lowercase_ : List[Any] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
lowercase_ : Any = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> List[Any]:
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(UpperCAmelCase__ )
if "attn.in_proj" in key:
lowercase_ : Union[str, Any] = key.split(""".""" )
if key.startswith("""visual""" ):
lowercase_ : Union[str, Any] = key_split[3]
lowercase_ : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase_ : Union[str, Any] = val[
:dim, :
]
lowercase_ : List[str] = val[
dim : dim * 2, :
]
lowercase_ : Dict = val[
-dim:, :
]
else:
lowercase_ : Union[str, Any] = val[
:dim
]
lowercase_ : Optional[int] = val[
dim : dim * 2
]
lowercase_ : Tuple = val[
-dim:
]
else:
if "weight" in key:
lowercase_ : int = val[
:dim, :
]
lowercase_ : Any = val[
dim : dim * 2, :
]
lowercase_ : Optional[int] = val[
-dim:, :
]
else:
lowercase_ : str = val[:dim]
lowercase_ : Tuple = val[
dim : dim * 2
]
lowercase_ : List[Any] = val[-dim:]
elif key.startswith("""mit""" ):
lowercase_ : Tuple = key_split[2]
lowercase_ : Optional[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
lowercase_ : Any = val[:dim, :]
lowercase_ : Optional[int] = val[dim : dim * 2, :]
lowercase_ : int = val[-dim:, :]
else:
lowercase_ : Dict = val[:dim]
lowercase_ : int = val[dim : dim * 2]
lowercase_ : List[Any] = val[-dim:]
else:
lowercase_ : List[Any] = key_split[2]
lowercase_ : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
lowercase_ : List[Any] = val[:dim, :]
lowercase_ : Any = val[
dim : dim * 2, :
]
lowercase_ : Dict = val[-dim:, :]
else:
lowercase_ : List[str] = val[:dim]
lowercase_ : Union[str, Any] = val[
dim : dim * 2
]
lowercase_ : List[str] = val[-dim:]
else:
lowercase_ : str = rename_key(UpperCAmelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase_ : int = val.T
lowercase_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[Any]:
if num_frames == 8:
lowercase_ : Tuple = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
lowercase_ : Union[str, Any] = """eating_spaghetti.npy"""
elif num_frames == 32:
lowercase_ : List[Any] = """eating_spaghetti_32_frames.npy"""
lowercase_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=UpperCAmelCase__ , repo_type="""dataset""" , )
lowercase_ : str = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Any=False ) -> Union[str, Any]:
lowercase_ : Dict = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
lowercase_ : Tuple = model_to_url[model_name]
lowercase_ : List[str] = 8
if "16-frames" in model_name:
lowercase_ : Any = 16
elif "shot" in model_name:
lowercase_ : Dict = 32
lowercase_ : Tuple = get_xclip_config(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[int] = XCLIPModel(UpperCAmelCase__ )
model.eval()
if "drive" in checkpoint_url:
lowercase_ : List[Any] = """pytorch_model.bin"""
gdown.cached_download(UpperCAmelCase__ , UpperCAmelCase__ , quiet=UpperCAmelCase__ )
lowercase_ : Dict = torch.load(UpperCAmelCase__ , map_location="""cpu""" )["""model"""]
else:
lowercase_ : List[str] = torch.hub.load_state_dict_from_url(UpperCAmelCase__ )["""model"""]
lowercase_ : str = convert_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[str] = XCLIPModel(UpperCAmelCase__ )
lowercase_ , lowercase_ : Tuple = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase_ : Any = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
lowercase_ : Optional[int] = VideoMAEImageProcessor(size=UpperCAmelCase__ )
lowercase_ : List[str] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase_ : str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase_ : Optional[int] = XCLIPProcessor(image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowercase_ : List[Any] = prepare_video(UpperCAmelCase__ )
lowercase_ : str = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=UpperCAmelCase__ , return_tensors="""pt""" , padding=UpperCAmelCase__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
lowercase_ : Any = model(**UpperCAmelCase__ )
# Verify outputs
lowercase_ : List[str] = outputs.logits_per_video
lowercase_ : Dict = logits_per_video.softmax(dim=1 )
print("""Probs:""" , UpperCAmelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase_ : List[str] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase_ : List[Any] = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowercase_ : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase_ : str = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowercase_ : Tuple = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase_ : Optional[Any] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase_ : Tuple = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase_ : Optional[Any] = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase_ : Tuple = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase_ : Optional[Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase_ : int = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase_ : Any = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase_ : Union[str, Any] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase_ : Optional[Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase_ : Union[str, Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase_ : Tuple = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase_ : Tuple = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase_ : Any = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(UpperCAmelCase__ , organization="""nielsr""" )
processor.push_to_hub(UpperCAmelCase__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(UpperCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Optional[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import argparse
import os
import re
_lowercase : List[Any] = "src/transformers"
# Pattern that looks at the indentation in a line.
_lowercase : str = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : str = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : List[Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : Tuple = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : str = re.compile(r"\[([^\]]+)\]")
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
lowercase_ : Union[str, Any] = _re_indent.search(UpperCAmelCase__ )
return "" if search is None else search.groups()[0]
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]="" , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None ) -> int:
lowercase_ : Dict = 0
lowercase_ : List[str] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase__ ):
index += 1
lowercase_ : str = ["""\n""".join(lines[:index] )]
else:
lowercase_ : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase_ : Optional[Any] = [lines[index]]
index += 1
while index < len(UpperCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCAmelCase__ ) )
if index < len(UpperCAmelCase__ ) - 1:
lowercase_ : str = [lines[index + 1]]
index += 1
else:
lowercase_ : int = []
else:
blocks.append("""\n""".join(UpperCAmelCase__ ) )
lowercase_ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase__ ) > 0:
blocks.append("""\n""".join(UpperCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
def _inner(UpperCAmelCase__ : List[str] ):
return key(UpperCAmelCase__ ).lower().replace("""_""" , """""" )
return _inner
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=None ) -> int:
# If no key is provided, we use a noop.
def noop(UpperCAmelCase__ : List[Any] ):
return x
if key is None:
lowercase_ : int = noop
# Constants are all uppercase, they go first.
lowercase_ : Any = [obj for obj in objects if key(UpperCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase_ : Union[str, Any] = [obj for obj in objects if key(UpperCAmelCase__ )[0].isupper() and not key(UpperCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase_ : str = [obj for obj in objects if not key(UpperCAmelCase__ )[0].isupper()]
lowercase_ : Dict = ignore_underscore(UpperCAmelCase__ )
return sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> Optional[int]:
# This inner function sort imports between [ ].
def _replace(UpperCAmelCase__ : Dict ):
lowercase_ : str = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
lowercase_ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ : int = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase__ )] ) + "]"
lowercase_ : Union[str, Any] = import_statement.split("""\n""" )
if len(UpperCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase_ : Optional[int] = 2 if lines[1].strip() == """[""" else 1
lowercase_ : Tuple = [(i, _re_strip_line.search(UpperCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase_ : Dict = sort_objects(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] )
lowercase_ : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase_ : Optional[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase_ : Optional[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ : List[str] = keys[:-1]
lowercase_ : Optional[Any] = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase__ )] )
return "\n".join(UpperCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
lowercase_ : List[Any] = _re_bracket_content.sub(_replace , UpperCAmelCase__ )
return import_statement
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=True ) -> Any:
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
lowercase_ : str = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase_ : Union[str, Any] = split_code_in_indented_blocks(
UpperCAmelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase_ : List[str] = main_blocks[block_idx]
lowercase_ : Union[str, Any] = block.split("""\n""" )
# Get to the start of the imports.
lowercase_ : Union[str, Any] = 0
while line_idx < len(UpperCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase_ : int = len(UpperCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase_ : Dict = """\n""".join(block_lines[line_idx:-1] )
lowercase_ : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase_ : List[str] = split_code_in_indented_blocks(UpperCAmelCase__ , indent_level=UpperCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase_ : str = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase_ : str = [(pattern.search(UpperCAmelCase__ ).groups()[0] if pattern.search(UpperCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase_ : List[Any] = [(i, key) for i, key in enumerate(UpperCAmelCase__ ) if key is not None]
lowercase_ : List[str] = [x[0] for x in sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase_ : Any = 0
lowercase_ : str = []
for i in range(len(UpperCAmelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase_ : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(UpperCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
lowercase_ : List[Any] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(UpperCAmelCase__ ) )
def lowerCamelCase ( UpperCAmelCase__ : List[Any]=True ) -> Dict:
lowercase_ : List[Any] = []
for root, _, files in os.walk(UpperCAmelCase__ ):
if "__init__.py" in files:
lowercase_ : Optional[Any] = sort_imports(os.path.join(UpperCAmelCase__ , """__init__.py""" ) , check_only=UpperCAmelCase__ )
if result:
lowercase_ : Dict = [os.path.join(UpperCAmelCase__ , """__init__.py""" )]
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCAmelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowercase : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Dict = quote(UpperCAmelCase__ )
return hfh.hf_hub_url(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" , revision=UpperCAmelCase__ )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Tuple = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
_lowercase : str = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> str:
lowercase_ : List[str] = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowercase_ : Dict = int(re.match(R""".*layer_(\d*).*""" , UpperCAmelCase__ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Union[str, Any]:
if dtype == torch.bool:
return 1 / 8
lowercase_ : Tuple = re.search(R"""[^\d](\d+)$""" , str(UpperCAmelCase__ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
lowercase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
# Construct model
if bloom_config_file == "":
lowercase_ : Union[str, Any] = BloomConfig()
else:
lowercase_ : int = BloomConfig.from_json_file(UpperCAmelCase__ )
if shard_model:
lowercase_ : Optional[Any] = os.listdir(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCAmelCase__ ) )
lowercase_ : Union[str, Any] = {"""weight_map""": {}, """metadata""": {}}
lowercase_ : Optional[int] = 0
lowercase_ : int = None
lowercase_ : str = BloomConfig()
for j, file in enumerate(UpperCAmelCase__ ):
print("""Processing file: {}""".format(UpperCAmelCase__ ) )
lowercase_ : Any = None
for i in range(UpperCAmelCase__ ):
# load all TP files
lowercase_ : Any = file.replace("""model_00""" , F'''model_0{i}''' )
lowercase_ : Any = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowercase_ : int = list(temp.keys() )
for key in keys:
lowercase_ : Any = temp.pop(UpperCAmelCase__ )
if tensors is None:
lowercase_ : str = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ : Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ : Any = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ : Dict = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase__ , os.path.join(
UpperCAmelCase__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowercase_ : Optional[int] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowercase_ : Optional[int] = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) )
lowercase_ : Dict = BloomConfig()
lowercase_ : int = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowercase_ : List[Any] = total_size
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
lowercase_ : List[Any] = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + """\n"""
f.write(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = BloomModel(UpperCAmelCase__ )
lowercase_ : int = os.listdir(UpperCAmelCase__ )
lowercase_ : Optional[int] = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCAmelCase__ ) )
lowercase_ : Union[str, Any] = None
for i, file in enumerate(UpperCAmelCase__ ):
lowercase_ : Tuple = None
for i in range(UpperCAmelCase__ ):
# load all TP files
lowercase_ : List[str] = file.replace("""model_00""" , F'''model_0{i}''' )
lowercase_ : List[Any] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowercase_ : int = list(temp.keys() )
for key in keys:
lowercase_ : int = temp.pop(UpperCAmelCase__ )
if tensors is None:
lowercase_ : Dict = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ : int = tensors[key] / pretraining_tp
lowercase_ : Optional[int] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowercase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
lowercase_ : str = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
lowercase_ : Union[str, Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase_ : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowercase_ : Union[str, Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
_lowercase : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[str] = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Any=False ) -> Dict:
lowercase_ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ) -> Tuple:
for i in range(config.num_hidden_layers ):
lowercase_ : Union[str, Any] = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : Tuple = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
lowercase_ : Optional[Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> List[Any]:
lowercase_ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowercase_ : Optional[Any] = dct.pop(UpperCAmelCase__ )
lowercase_ : Optional[Any] = val
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ) -> List[str]:
lowercase_ : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCAmelCase__ )
lowercase_ : str = False
lowercase_ : str = False
lowercase_ : str = False
lowercase_ : Optional[int] = False
if "vqa" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Optional[Any] = 3129
lowercase_ : int = """huggingface/label-files"""
lowercase_ : Dict = """vqa2-id2label.json"""
lowercase_ : Dict = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ : Optional[int] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
lowercase_ : Optional[Any] = idalabel
lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(UpperCAmelCase__ )
elif "nlvr" in checkpoint_url:
lowercase_ : Any = True
lowercase_ : Tuple = 2
lowercase_ : Optional[int] = {0: """False""", 1: """True"""}
lowercase_ : Union[str, Any] = {v: k for k, v in config.idalabel.items()}
lowercase_ : List[str] = 3
lowercase_ : Tuple = ViltForImagesAndTextClassification(UpperCAmelCase__ )
elif "irtr" in checkpoint_url:
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = ViltForImageAndTextRetrieval(UpperCAmelCase__ )
elif "mlm_itm" in checkpoint_url:
lowercase_ : Optional[int] = True
lowercase_ : Tuple = ViltForMaskedLM(UpperCAmelCase__ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowercase_ : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="""cpu""" )["""state_dict"""]
lowercase_ : int = create_rename_keys(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
if mlm_model or irtr_model:
lowercase_ : Union[str, Any] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Tuple = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCAmelCase__ )
# Define processor
lowercase_ : Optional[Any] = ViltImageProcessor(size=384 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase_ : Any = ViltProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCAmelCase__ ).raw )
lowercase_ : Optional[int] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCAmelCase__ ).raw )
lowercase_ : Optional[int] = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowercase_ : int = processor(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
lowercase_ : Dict = processor(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
lowercase_ : Any = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : Any = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCAmelCase__ ).raw )
if mlm_model:
lowercase_ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowercase_ : List[str] = """How many cats are there?"""
lowercase_ : Union[str, Any] = processor(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
lowercase_ : Dict = model(**UpperCAmelCase__ )
# Verify outputs
if mlm_model:
lowercase_ : List[str] = torch.Size([1, 11, 30522] )
lowercase_ : List[Any] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : Any = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Any = torch.Size([1, 3129] )
lowercase_ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : List[Any] = torch.Size([1, 2] )
lowercase_ : List[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowercase : int = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
from typing import Any
def lowerCamelCase ( UpperCAmelCase__ : list ) -> list[Any]:
if not input_list:
return []
lowercase_ : Dict = [input_list.count(UpperCAmelCase__ ) for value in input_list]
lowercase_ : Optional[Any] = max(UpperCAmelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCAmelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
import os
import sys
import unittest
_lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowercase : Any = os.path.join(git_repo_path, "src", "transformers")
_lowercase : Tuple = "\n{0} = None\n"
_lowercase : Optional[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
_lowercase : Any = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Union[str, Any] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(lowercase_ )
lowercase_ : Optional[int] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(lowercase_ , """tokenizers""" )
lowercase_ : Optional[Any] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(lowercase_ , """tensorflow_text""" )
lowercase_ : str = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(lowercase_ , """sentencepiece_and_tokenizers""" )
lowercase_ : str = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(lowercase_ , """sentencepiece_and_tensorflow_text""" )
lowercase_ : Union[str, Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(lowercase_ , """sentencepiece_and_tokenizers_and_vision""" )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , lowercase_ )
self.assertIn("""tensorflow_text""" , lowercase_ )
self.assertIn("""sentencepiece_and_tokenizers""" , lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(lowercase_ , """\nCONSTANT = None\n""" )
lowercase_ : Optional[Any] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
lowercase_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
lowercase_ : Tuple = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
lowercase_ : str = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
lowercase_ : Optional[int] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , lowercase_ )
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowercase : List[Any] = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __magic_name__ ( unittest.TestCase):
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
lowercase_ : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
lowercase_ : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}] )
lowercase_ : Any = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
lowercase_ : Optional[int] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
# Legacy behavior
lowercase_ : Optional[int] = text_classifier("""This is great !""" , return_all_scores=lowercase_ )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
lowercase_ : List[str] = text_classifier("""This is great !""" , return_all_scores=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}]] )
lowercase_ : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
lowercase_ : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"""label""": """LABEL_0""", """score""": 0.5_04},
{"""label""": """LABEL_0""", """score""": 0.5_04},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
import torch
lowercase_ : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
lowercase_ : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
lowercase_ : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = pipeline("""text-classification""" )
lowercase_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase_ : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase_ : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = pipeline("""text-classification""" , framework="""tf""" )
lowercase_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase_ : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase_ : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ):
lowercase_ : Dict = TextClassificationPipeline(model=lowercase_ , tokenizer=lowercase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase_ : Optional[Any] = """HuggingFace is in"""
lowercase_ : Union[str, Any] = text_classifier(lowercase_ )
self.assertEqual(nested_simplify(lowercase_ ) , [{"""label""": ANY(lowercase_ ), """score""": ANY(lowercase_ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
lowercase_ : int = ["""HuggingFace is in """, """Paris is in France"""]
lowercase_ : int = text_classifier(lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"""label""": ANY(lowercase_ ), """score""": ANY(lowercase_ )}, {"""label""": ANY(lowercase_ ), """score""": ANY(lowercase_ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase_ : List[Any] = text_classifier(lowercase_ , top_k=lowercase_ )
lowercase_ : str = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase_ ) , [[{"""label""": ANY(lowercase_ ), """score""": ANY(lowercase_ )}] * N, [{"""label""": ANY(lowercase_ ), """score""": ANY(lowercase_ )}] * N] , )
lowercase_ : int = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowercase_ : Optional[Any] = text_classifier(lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ ) , {"""label""": ANY(lowercase_ ), """score""": ANY(lowercase_ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase_ : Optional[Any] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(lowercase_ ):
text_classifier(lowercase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase_ : Optional[Any] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"""label""": ANY(lowercase_ ), """score""": ANY(lowercase_ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''vivit'''
def __init__( self : List[str] , lowercase_ : Optional[Any]=224 , lowercase_ : List[str]=32 , lowercase_ : Any=[2, 16, 16] , lowercase_ : List[str]=3 , lowercase_ : List[Any]=768 , lowercase_ : str=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Tuple=3072 , lowercase_ : Tuple="gelu_fast" , lowercase_ : List[str]=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : int=1E-06 , lowercase_ : Union[str, Any]=True , **lowercase_ : Optional[int] , ):
lowercase_ : Tuple = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Dict = initializer_range
lowercase_ : str = layer_norm_eps
lowercase_ : Union[str, Any] = image_size
lowercase_ : Tuple = num_frames
lowercase_ : Tuple = tubelet_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : str = qkv_bias
super().__init__(**lowercase_ )
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Dict:
lowercase_ : List[str] = filter(lambda UpperCAmelCase__ : p.requires_grad , model.parameters() )
lowercase_ : Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowercase : Dict = logging.getLogger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
if metric == "rouge2":
lowercase_ : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
lowercase_ : str = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
lowercase_ : List[Any] = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
lowercase_ : List[str] = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
lowercase_ : Dict = ModelCheckpoint(
dirpath=UpperCAmelCase__ , filename=UpperCAmelCase__ , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ) -> Tuple:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=UpperCAmelCase__ , verbose=UpperCAmelCase__ , )
class __magic_name__ ( pl.Callback):
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : List[str] ):
lowercase_ : str = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase_ )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule , lowercase_ : str , lowercase_ : Dict=True ):
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowercase_ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
lowercase_ : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase_ : List[Any] = od / """test_results.txt"""
lowercase_ : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowercase_ : str = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowercase_ )
generations_file.parent.mkdir(exist_ok=lowercase_ )
with open(lowercase_ , """a+""" ) as writer:
for key in sorted(lowercase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ : List[str] = metrics[key]
if isinstance(lowercase_ , torch.Tensor ):
lowercase_ : Any = val.item()
lowercase_ : Optional[int] = f'''{key}: {val:.6f}\n'''
writer.write(lowercase_ )
if not save_generations:
return
if "preds" in metrics:
lowercase_ : List[str] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase_ )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : List[str] ):
try:
lowercase_ : List[Any] = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ : List[Any] = pl_module.model.num_parameters()
lowercase_ : Optional[Any] = count_trainable_parameters(lowercase_ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase_ , lowercase_ , """test""" )
@rank_zero_only
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
UpperCamelCase__ = ViTImageProcessor if is_vision_available() else None
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Tuple = (3, 32, 128)
lowercase_ : Any = tempfile.mkdtemp()
# fmt: off
lowercase_ : str = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase_ : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
lowercase_ : Optional[Any] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowercase_ : Union[str, Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **lowercase_ : Any ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int , **lowercase_ : Any ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : int = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowercase_ : Dict = Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) )
return image_input
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : Optional[int] = self.get_image_processor()
lowercase_ : Dict = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Optional[int] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[int] = self.get_image_processor()
lowercase_ : int = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowercase_ : int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = self.get_image_processor()
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : Dict = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ : Tuple = self.prepare_image_inputs()
lowercase_ : List[str] = image_processor(lowercase_ , return_tensors="""np""" )
lowercase_ : Optional[int] = processor(images=lowercase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : Tuple = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ : Any = """test"""
lowercase_ : str = processor(text=lowercase_ )
lowercase_ : Union[str, Any] = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : Union[str, Any] = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ : Optional[Any] = """test"""
lowercase_ : int = self.prepare_image_inputs()
lowercase_ : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : Union[str, Any] = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.char_decode(lowercase_ )
lowercase_ : Optional[int] = tokenizer.batch_decode(lowercase_ )
lowercase_ : List[str] = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = self.get_image_processor()
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Dict = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ : int = None
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Any = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : Any = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ : Optional[int] = torch.randn(1 , 27 , 38 )
lowercase_ : List[str] = torch.randn(1 , 27 , 50257 )
lowercase_ : List[Any] = torch.randn(1 , 27 , 30522 )
lowercase_ : Dict = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowercase : str = logging.get_logger("transformers.models.encodec")
_lowercase : Union[str, Any] = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
_lowercase : Dict = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
_lowercase : Dict = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
_lowercase : List[str] = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
_lowercase : List[Any] = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
_lowercase : Tuple = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowercase : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowercase : Dict = []
_lowercase : Any = []
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] ) -> Optional[int]:
for attribute in key.split(""".""" ):
lowercase_ : List[str] = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ : Optional[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase_ : List[str] = value
elif weight_type == "weight_g":
lowercase_ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase_ : str = value
elif weight_type == "bias":
lowercase_ : Dict = value
elif weight_type == "running_mean":
lowercase_ : int = value
elif weight_type == "running_var":
lowercase_ : Any = value
elif weight_type == "num_batches_tracked":
lowercase_ : Union[str, Any] = value
elif weight_type == "weight_ih_l0":
lowercase_ : Dict = value
elif weight_type == "weight_hh_l0":
lowercase_ : Optional[int] = value
elif weight_type == "bias_ih_l0":
lowercase_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
lowercase_ : Optional[int] = value
elif weight_type == "weight_ih_l1":
lowercase_ : Union[str, Any] = value
elif weight_type == "weight_hh_l1":
lowercase_ : Optional[Any] = value
elif weight_type == "bias_ih_l1":
lowercase_ : List[Any] = value
elif weight_type == "bias_hh_l1":
lowercase_ : Any = value
else:
lowercase_ : Any = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase_ , lowercase_ : Dict = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] ) -> Any:
lowercase_ : Optional[int] = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowercase_ : int = MAPPING_24K
elif model_name == "encodec_48khz":
lowercase_ : int = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
lowercase_ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowercase_ , lowercase_ : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
lowercase_ : Tuple = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
lowercase_ : int = True
if "*" in mapped_key:
lowercase_ : str = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ : str = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ : str = """weight_g"""
elif "weight_v" in name:
lowercase_ : Optional[int] = """weight_v"""
elif "weight_ih_l0" in name:
lowercase_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
lowercase_ : str = """weight_hh_l0"""
elif "bias_ih_l0" in name:
lowercase_ : Dict = """bias_ih_l0"""
elif "bias_hh_l0" in name:
lowercase_ : Optional[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
lowercase_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
lowercase_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
lowercase_ : int = """bias_ih_l1"""
elif "bias_hh_l1" in name:
lowercase_ : Union[str, Any] = """bias_hh_l1"""
elif "bias" in name:
lowercase_ : str = """bias"""
elif "weight" in name:
lowercase_ : Optional[Any] = """weight"""
elif "running_mean" in name:
lowercase_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
lowercase_ : Dict = """running_var"""
elif "num_batches_tracked" in name:
lowercase_ : Tuple = """num_batches_tracked"""
else:
lowercase_ : Any = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , ) -> str:
if config_path is not None:
lowercase_ : Union[str, Any] = EncodecConfig.from_pretrained(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowercase_ : List[str] = [8, 5, 4, 4]
lowercase_ : str = [2.2]
lowercase_ : Optional[Any] = 64
lowercase_ : int = 32000
lowercase_ : int = 2048
lowercase_ : List[Any] = False
lowercase_ : Union[str, Any] = False
lowercase_ : Tuple = False
elif model_name == "encodec_48khz":
lowercase_ : int = [8, 5, 4, 2]
lowercase_ : str = [3.0, 6.0, 12.0, 24.0]
lowercase_ : List[Any] = 48000
lowercase_ : Tuple = 2
lowercase_ : List[str] = False
lowercase_ : int = """time_group_norm"""
lowercase_ : str = True
lowercase_ : List[str] = 1.0
lowercase_ : str = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowercase_ : Tuple = EncodecModel(UpperCAmelCase__ )
lowercase_ : Tuple = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = torch.load(UpperCAmelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowercase_ : Any = original_checkpoint["""best_state"""]
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(UpperCAmelCase__ )
model.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowercase : Any = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ) -> list[int]:
lowercase_ : str = 0
lowercase_ : int = len(UpperCAmelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase_ : Dict = i + 1
else:
lowercase_ : Union[str, Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_lowercase : Union[str, Any] = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_lowercase : Optional[int] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_lowercase : str = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Any , lowercase_ : int ):
lowercase_ : List[str] = 0.0
for i, j in zip(lowercase_ , lowercase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_ ) else 0.0
lowercase_ : Union[str, Any] = n_correct / len(lowercase_ )
return {
"accuracy": accuracy,
}
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __magic_name__ :
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : str=14 , lowercase_ : int=7 , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=False , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=99 , lowercase_ : Optional[int]=32 , lowercase_ : int=4 , lowercase_ : List[Any]=4 , lowercase_ : Tuple=4 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=512 , lowercase_ : Tuple=0.02 , ):
lowercase_ : Optional[int] = parent
lowercase_ : List[Any] = batch_size
lowercase_ : str = seq_length
lowercase_ : List[str] = is_training
lowercase_ : List[str] = use_input_mask
lowercase_ : int = use_token_type_ids
lowercase_ : str = use_labels
lowercase_ : Dict = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : int = rotary_dim
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Tuple = initializer_range
lowercase_ : Any = None
lowercase_ : Dict = vocab_size - 1
lowercase_ : Tuple = vocab_size - 1
lowercase_ : List[Any] = vocab_size - 1
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[Any] = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Dict = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs
lowercase_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ):
lowercase_ : str = 20
lowercase_ : Optional[Any] = model_class_name(lowercase_ )
lowercase_ : int = model.init_cache(input_ids.shape[0] , lowercase_ )
lowercase_ : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowercase_ : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , )
lowercase_ : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowercase_ : str = model(
input_ids[:, -1:] , attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowercase_ , )
lowercase_ : int = model(lowercase_ )
lowercase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] ):
lowercase_ : int = 20
lowercase_ : List[Any] = model_class_name(lowercase_ )
lowercase_ : Any = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase_ : int = model.init_cache(input_ids.shape[0] , lowercase_ )
lowercase_ : int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase_ : int = model(
input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , )
lowercase_ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowercase_ : Optional[int] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase_ , position_ids=lowercase_ , )
lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ )
lowercase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCamelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Union[str, Any] = FlaxGPTJModelTester(self )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowercase_ , lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
for model_class_name in self.all_model_classes:
lowercase_ , lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
@tooslow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Tuple = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowercase_ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowercase_ , truncation=lowercase_ )
lowercase_ : List[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowercase_ : Optional[Any] = False
lowercase_ : Optional[int] = model.config.eos_token_id
lowercase_ : Union[str, Any] = jax.jit(model.generate )
lowercase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase_ : List[Any] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowercase_ : List[str] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowercase_ , lowercase_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase_ : Union[str, Any] = getattr(lowercase_ , lowercase_ )
lowercase_ , lowercase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowercase_ : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase_ ):
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = 1
lowercase_ : str = 0
lowercase_ : List[str] = 1
lowercase_ : List[Any] = pt_model_class(lowercase_ ).eval()
lowercase_ : Optional[int] = model_class(lowercase_ , dtype=jnp.floataa )
lowercase_ : List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
lowercase_ : Dict = fx_state
with torch.no_grad():
lowercase_ : List[Any] = pt_model(**lowercase_ ).to_tuple()
lowercase_ : Union[str, Any] = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowercase_ , lowercase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
lowercase_ : Dict = model_class.from_pretrained(lowercase_ , from_pt=lowercase_ )
lowercase_ : Any = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(
len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowercase_ , lowercase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase_ : Tuple = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase_ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase_ : Optional[Any] = getattr(lowercase_ , lowercase_ )
lowercase_ : Dict = pt_model_class(lowercase_ ).eval()
lowercase_ : Dict = model_class(lowercase_ , dtype=jnp.floataa )
lowercase_ : Any = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
lowercase_ , lowercase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowercase_ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase_ ):
lowercase_ : Tuple = 0
lowercase_ : int = 1
lowercase_ : List[Any] = 0
lowercase_ : Any = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase_ : Tuple = pt_model(**lowercase_ ).to_tuple()
lowercase_ : str = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowercase_ , lowercase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
lowercase_ : Dict = pt_model_class.from_pretrained(lowercase_ , from_flax=lowercase_ )
with torch.no_grad():
lowercase_ : str = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(
len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowercase_ , lowercase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def SCREAMING_SNAKE_CASE_ ( self : int ):
for model_class_name in self.all_model_classes:
lowercase_ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowercase_ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_text_model'''
def __init__( self : List[str] , lowercase_ : Dict=250002 , lowercase_ : int=1024 , lowercase_ : Optional[Any]=24 , lowercase_ : str=16 , lowercase_ : List[str]=4096 , lowercase_ : List[str]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : List[str]=514 , lowercase_ : Any=1 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[Any]=1E-05 , lowercase_ : Optional[int]=1 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : str="absolute" , lowercase_ : int=True , lowercase_ : List[Any]=768 , **lowercase_ : int , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Any = hidden_act
lowercase_ : Any = intermediate_size
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : int = initializer_factor
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Dict = position_embedding_type
lowercase_ : int = use_cache
lowercase_ : List[Any] = project_dim
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_vision_model'''
def __init__( self : List[str] , lowercase_ : Tuple=768 , lowercase_ : Any=3072 , lowercase_ : Union[str, Any]=512 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Optional[Any]=3 , lowercase_ : str=224 , lowercase_ : str=32 , lowercase_ : Any="quick_gelu" , lowercase_ : int=1E-5 , lowercase_ : Any=0.0 , lowercase_ : int=0.02 , lowercase_ : Optional[int]=1.0 , **lowercase_ : Tuple , ):
super().__init__(**lowercase_ )
lowercase_ : Dict = hidden_size
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Union[str, Any] = projection_dim
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : int = num_channels
lowercase_ : Tuple = patch_size
lowercase_ : int = image_size
lowercase_ : int = initializer_range
lowercase_ : Dict = initializer_factor
lowercase_ : List[Any] = attention_dropout
lowercase_ : Any = layer_norm_eps
lowercase_ : str = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Dict ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : Optional[int] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowercase_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip'''
UpperCamelCase__ = True
def __init__( self : Tuple , lowercase_ : List[str]=None , lowercase_ : Any=None , lowercase_ : Optional[Any]=768 , lowercase_ : List[str]=2.65_92 , **lowercase_ : Optional[int] ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ : Tuple = kwargs.pop("""text_config_dict""" , lowercase_ )
lowercase_ : Dict = kwargs.pop("""vision_config_dict""" , lowercase_ )
super().__init__(**lowercase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ : int = {}
# This is the complete result when using `text_config_dict`.
lowercase_ : Any = AltCLIPTextConfig(**lowercase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[str] = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ : List[str] = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ : List[Any] = AltCLIPVisionConfig(**lowercase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ : Union[str, Any] = {
str(lowercase_ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ : List[str] = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[Any] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ : str = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowercase_ : Union[str, Any] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowercase_ : Any = AltCLIPTextConfig(**lowercase_ )
lowercase_ : List[str] = AltCLIPVisionConfig(**lowercase_ )
lowercase_ : List[Any] = projection_dim
lowercase_ : List[Any] = logit_scale_init_value
lowercase_ : Tuple = 1.0
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , lowercase_ : AltCLIPTextConfig , lowercase_ : AltCLIPVisionConfig , **lowercase_ : str ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Tuple = self.text_config.to_dict()
lowercase_ : Optional[Any] = self.vision_config.to_dict()
lowercase_ : int = self.__class__.model_type
return output
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowercase : Optional[Any] = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]="pt" ) -> str:
lowercase_ : Optional[Any] = {"""add_prefix_space""": True} if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not line.startswith(""" """ ) else {}
lowercase_ : List[str] = padding_side
return tokenizer(
[line] , max_length=UpperCAmelCase__ , padding="""max_length""" if pad_to_max_length else None , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=None , ) -> List[Any]:
lowercase_ : Union[str, Any] = input_ids.ne(UpperCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Any="train" , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]="" , ):
super().__init__()
lowercase_ : Union[str, Any] = Path(lowercase_ ).joinpath(type_path + """.source""" )
lowercase_ : Union[str, Any] = Path(lowercase_ ).joinpath(type_path + """.target""" )
lowercase_ : Any = self.get_char_lens(self.src_file )
lowercase_ : Tuple = max_source_length
lowercase_ : str = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
lowercase_ : Optional[Any] = tokenizer
lowercase_ : str = prefix
if n_obs is not None:
lowercase_ : Any = self.src_lens[:n_obs]
lowercase_ : Tuple = src_lang
lowercase_ : int = tgt_lang
def __len__( self : Optional[Any] ):
return len(self.src_lens )
def __getitem__( self : Optional[int] , lowercase_ : List[str] ):
lowercase_ : List[str] = index + 1 # linecache starts at 1
lowercase_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase_ ).rstrip("""\n""" )
lowercase_ : List[Any] = linecache.getline(str(self.tgt_file ) , lowercase_ ).rstrip("""\n""" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase_ ) else self.tokenizer
)
lowercase_ : Tuple = self.tokenizer.generator if isinstance(self.tokenizer , lowercase_ ) else self.tokenizer
lowercase_ : str = encode_line(lowercase_ , lowercase_ , self.max_source_length , """right""" )
lowercase_ : Optional[int] = encode_line(lowercase_ , lowercase_ , self.max_target_length , """right""" )
lowercase_ : Any = source_inputs["""input_ids"""].squeeze()
lowercase_ : Dict = target_inputs["""input_ids"""].squeeze()
lowercase_ : int = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE_ ( lowercase_ : int ):
return [len(lowercase_ ) for x in Path(lowercase_ ).open().readlines()]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[str] ):
lowercase_ : Any = torch.stack([x["""input_ids"""] for x in batch] )
lowercase_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
lowercase_ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] )
lowercase_ : int = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase_ )
else self.tokenizer.pad_token_id
)
lowercase_ : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase_ )
else self.tokenizer.pad_token_id
)
lowercase_ : Tuple = trim_batch(lowercase_ , lowercase_ )
lowercase_ , lowercase_ : List[str] = trim_batch(lowercase_ , lowercase_ , attention_mask=lowercase_ )
lowercase_ : str = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
_lowercase : Union[str, Any] = getLogger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : List[List] ) -> Optional[Any]:
return list(itertools.chain.from_iterable(UpperCAmelCase__ ) )
def lowerCamelCase ( UpperCAmelCase__ : str ) -> None:
lowercase_ : Dict = get_git_info()
save_json(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , """git_log.json""" ) )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=4 , **UpperCAmelCase__ : List[Any] ) -> int:
with open(UpperCAmelCase__ , """w""" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , indent=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str ) -> Union[str, Any]:
with open(UpperCAmelCase__ ) as f:
return json.load(UpperCAmelCase__ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : Union[str, Any] = git.Repo(search_parent_directories=UpperCAmelCase__ )
lowercase_ : str = {
"""repo_id""": str(UpperCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : Iterable ) -> List:
return list(map(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Any:
with open(UpperCAmelCase__ , """wb""" ) as f:
return pickle.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> Dict:
def remove_articles(UpperCAmelCase__ : List[str] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , UpperCAmelCase__ )
def white_space_fix(UpperCAmelCase__ : str ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase__ : Tuple ):
lowercase_ : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase__ : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowercase_ : Optional[int] = normalize_answer(UpperCAmelCase__ ).split()
lowercase_ : Union[str, Any] = normalize_answer(UpperCAmelCase__ ).split()
lowercase_ : Optional[Any] = Counter(UpperCAmelCase__ ) & Counter(UpperCAmelCase__ )
lowercase_ : List[str] = sum(common.values() )
if num_same == 0:
return 0
lowercase_ : Dict = 1.0 * num_same / len(UpperCAmelCase__ )
lowercase_ : str = 1.0 * num_same / len(UpperCAmelCase__ )
lowercase_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any ) -> Tuple:
return normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ) -> Dict:
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
lowercase_ : str = 0
for hypo, pred in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
em += exact_match_score(UpperCAmelCase__ , UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
em /= len(UpperCAmelCase__ )
return {"em": em}
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Any:
return model_prefix.startswith("""rag""" )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
lowercase_ : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ : List[Any] = """dropout_rate"""
for p in extra_params:
if getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if not hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) and not hasattr(UpperCAmelCase__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(UpperCAmelCase__ ) )
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
continue
lowercase_ : Dict = p if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) else equivalent_param[p]
setattr(UpperCAmelCase__ , UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
return hparams, config
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __magic_name__ :
def __init__( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=13 , lowercase_ : List[Any]=7 , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=False , lowercase_ : List[str]=True , lowercase_ : str=99 , lowercase_ : Any=64 , lowercase_ : List[Any]=5 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=64 , lowercase_ : Tuple="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : Any=0.1 , lowercase_ : Tuple=512 , lowercase_ : Any=16 , lowercase_ : Tuple=2 , lowercase_ : Tuple=0.02 , lowercase_ : Dict=3 , lowercase_ : Tuple=4 , lowercase_ : Optional[int]=None , ):
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : str = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : List[str] = use_input_mask
lowercase_ : Optional[int] = use_token_type_ids
lowercase_ : Any = use_labels
lowercase_ : Any = vocab_size
lowercase_ : Optional[int] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Tuple = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : int = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : int = num_labels
lowercase_ : int = num_choices
lowercase_ : List[str] = scope
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[Any] = None
if self.use_input_mask:
lowercase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : List[str] = None
lowercase_ : int = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[Any] = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ , lowercase_ )
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Any ):
lowercase_ : Union[str, Any] = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Any = self.num_labels
lowercase_ : Tuple = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = self.num_choices
lowercase_ : List[str] = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Any = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) : int = config_and_inputs
lowercase_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[Any] = MPNetModelTester(self )
lowercase_ : Tuple = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
lowercase_ : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase_ : List[Any] = model(lowercase_ )[0]
lowercase_ : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase_ )
lowercase_ : List[Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : Any ):
lowercase_ : str = parent
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return {}
def lowerCamelCase ( ) -> int:
lowercase_ : Union[str, Any] = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
lowercase_ : Optional[Any] = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : List[Any] = MarkupLMFeatureExtractionTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Initialize feature_extractor
lowercase_ : List[Any] = self.feature_extraction_class()
# Test not batched input
lowercase_ : Union[str, Any] = get_html_strings()[0]
lowercase_ : Tuple = feature_extractor(lowercase_ )
# fmt: off
lowercase_ : List[Any] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
lowercase_ : Union[str, Any] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , lowercase_ )
self.assertEqual(encoding.xpaths , lowercase_ )
# Test batched
lowercase_ : Tuple = get_html_strings()
lowercase_ : Optional[Any] = feature_extractor(lowercase_ )
# fmt: off
lowercase_ : List[str] = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
lowercase_ : int = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowercase_ )
self.assertEqual(encoding.xpaths , lowercase_ )
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = ['''pixel_values''']
def __init__( self : Optional[int] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PIL.Image.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Union[str, Any] , ):
super().__init__(**lowercase_ )
lowercase_ : Any = size if size is not None else {"""height""": 256, """width""": 256}
lowercase_ : str = get_size_dict(lowercase_ )
lowercase_ : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ : Dict = get_size_dict(lowercase_ , param_name="""crop_size""" )
lowercase_ : Dict = do_resize
lowercase_ : List[str] = size
lowercase_ : List[str] = resample
lowercase_ : Tuple = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : List[str] = do_rescale
lowercase_ : Any = rescale_factor
lowercase_ : Optional[Any] = do_normalize
lowercase_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PIL.Image.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ):
lowercase_ : Optional[int] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
lowercase_ , size=(size["""height"""], size["""width"""]) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ):
lowercase_ : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["""height"""], size["""width"""]) , data_format=lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : str=None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[str] , ):
lowercase_ : str = do_resize if do_resize is not None else self.do_resize
lowercase_ : str = resample if resample is not None else self.resample
lowercase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowercase_ : List[str] = image_std if image_std is not None else self.image_std
lowercase_ : Tuple = size if size is not None else self.size
lowercase_ : Dict = get_size_dict(lowercase_ )
lowercase_ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase_ : List[str] = get_size_dict(lowercase_ , param_name="""crop_size""" )
lowercase_ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase_ : str = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase_ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase_ : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase_ : Tuple = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase_ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase_ : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[int] , lowercase_ : str="" , lowercase_ : Dict="train" ):
assert os.path.isdir(lowercase_ )
lowercase_ : Tuple = []
lowercase_ : str = os.listdir(lowercase_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowercase_ : int = os.path.join(lowercase_ , lowercase_ )
if not os.path.isfile(lowercase_ ):
continue
self.documents.append(lowercase_ )
def __len__( self : List[str] ):
return len(self.documents )
def __getitem__( self : Dict , lowercase_ : Tuple ):
lowercase_ : List[Any] = self.documents[idx]
lowercase_ : List[str] = document_path.split("""/""" )[-1]
with open(lowercase_ , encoding="""utf-8""" ) as source:
lowercase_ : List[Any] = source.read()
lowercase_ , lowercase_ : Tuple = process_story(lowercase_ )
return document_name, story_lines, summary_lines
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> int:
lowercase_ : Optional[Any] = list(filter(lambda UpperCAmelCase__ : len(UpperCAmelCase__ ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
lowercase_ : Tuple = [_add_missing_period(UpperCAmelCase__ ) for line in nonempty_lines]
# gather article lines
lowercase_ : str = []
lowercase_ : str = deque(UpperCAmelCase__ )
while True:
try:
lowercase_ : str = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(UpperCAmelCase__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowercase_ : int = list(filter(lambda UpperCAmelCase__ : not t.startswith("""@highlight""" ) , UpperCAmelCase__ ) )
return story_lines, summary_lines
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
lowercase_ : Optional[int] = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Any:
if len(UpperCAmelCase__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(UpperCAmelCase__ )) )
return sequence
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ) -> Tuple:
lowercase_ : List[str] = torch.ones_like(UpperCAmelCase__ )
lowercase_ : Tuple = sequence == pad_token_id
lowercase_ : int = 0
return mask
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]:
lowercase_ : List[str] = [tokenizer.encode(UpperCAmelCase__ ) for line in story_lines]
lowercase_ : Union[str, Any] = [token for sentence in story_lines_token_ids for token in sentence]
lowercase_ : int = [tokenizer.encode(UpperCAmelCase__ ) for line in summary_lines]
lowercase_ : Dict = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ) -> int:
lowercase_ : Optional[Any] = []
for sequence in batch:
lowercase_ : Any = -1
lowercase_ : List[str] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(UpperCAmelCase__ )
return torch.tensor(UpperCAmelCase__ )
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str="attention" ) -> Tuple:
lowercase_ : str = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
lowercase_ : Any = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
lowercase_ : List[str] = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
lowercase_ : int = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=False ) -> Optional[int]:
if split_mlp_wi:
lowercase_ : List[str] = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
lowercase_ : Dict = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
lowercase_ : List[str] = (wi_a, wi_a)
else:
lowercase_ : Any = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
lowercase_ : List[Any] = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> Union[str, Any]:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowerCamelCase ( UpperCAmelCase__ : dict , *, UpperCAmelCase__ : int , UpperCAmelCase__ : bool ) -> int:
lowercase_ : List[Any] = traverse_util.flatten_dict(variables["""target"""] )
lowercase_ : str = {"""/""".join(UpperCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ : List[str] = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , UpperCAmelCase__ )
lowercase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase_ : List[Any] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase_ : List[Any] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , """attention""" )
lowercase_ : str = layer_norm
lowercase_ : Optional[int] = k.T
lowercase_ : Optional[Any] = o.T
lowercase_ : Dict = q.T
lowercase_ : int = v.T
# Block i, layer 1 (MLP).
lowercase_ : Any = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowercase_ , lowercase_ : str = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = layer_norm
if split_mlp_wi:
lowercase_ : Tuple = wi[0].T
lowercase_ : List[Any] = wi[1].T
else:
lowercase_ : List[Any] = wi.T
lowercase_ : Union[str, Any] = wo.T
lowercase_ : Dict = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowercase_ : Optional[Any] = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase_ : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """self_attention""" )
lowercase_ : List[str] = layer_norm
lowercase_ : Optional[int] = k.T
lowercase_ : Any = o.T
lowercase_ : List[str] = q.T
lowercase_ : int = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ : str = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Dict = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
lowercase_ : Dict = layer_norm
lowercase_ : Union[str, Any] = k.T
lowercase_ : Dict = o.T
lowercase_ : str = q.T
lowercase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowercase_ : Dict = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowercase_ , lowercase_ : List[str] = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , UpperCAmelCase__ )
lowercase_ : int = layer_norm
if split_mlp_wi:
lowercase_ : Union[str, Any] = wi[0].T
lowercase_ : Union[str, Any] = wi[1].T
else:
lowercase_ : List[Any] = wi.T
lowercase_ : int = wo.T
lowercase_ : Dict = old["""decoder/decoder_norm/scale"""]
lowercase_ : Dict = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ : List[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : bool ) -> int:
lowercase_ : List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ : Any = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowercase_ : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ) -> Any:
lowercase_ : str = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
lowercase_ : str = convert_tax_to_pytorch(UpperCAmelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase__ )
lowercase_ : Optional[Any] = make_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : bool = False ) -> List[str]:
lowercase_ : Dict = TaConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ : Optional[int] = TaEncoderModel(UpperCAmelCase__ )
else:
lowercase_ : Dict = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
_lowercase : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = "▁"
_lowercase : str = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowercase : Tuple = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowercase : Optional[int] = {"vinai/bartpho-syllable": 1024}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : List[Any]="<s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : Optional[Any]="<s>" , lowercase_ : int="<unk>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : List[Any]="<mask>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : int , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
lowercase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase_ : Tuple = vocab_file
lowercase_ : Any = monolingual_vocab_file
lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase_ : Dict = {}
lowercase_ : Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowercase_ ) not in self.fairseq_tokens_to_ids:
lowercase_ : List[Any] = cnt
cnt += 1
with open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
lowercase_ : int = line.strip().split()[0]
lowercase_ : Dict = len(self.fairseq_tokens_to_ids )
if str(lowercase_ ) not in self.fairseq_tokens_to_ids:
lowercase_ : Optional[Any] = len(self.fairseq_tokens_to_ids )
lowercase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
lowercase_ : List[str] = self.__dict__.copy()
lowercase_ : Optional[Any] = None
lowercase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , lowercase_ : List[str] ):
lowercase_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase_ : int = {}
lowercase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : Optional[int] = [self.cls_token_id]
lowercase_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : Any = [self.sep_token_id]
lowercase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Dict = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str ):
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = """""".join(lowercase_ ).replace(lowercase_ , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : str = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , """wb""" ) as fi:
lowercase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowercase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowercase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowercase_ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''megatron-bert'''
def __init__( self : Tuple , lowercase_ : int=29056 , lowercase_ : List[str]=1024 , lowercase_ : Optional[int]=24 , lowercase_ : Any=16 , lowercase_ : List[Any]=4096 , lowercase_ : int="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.1 , lowercase_ : str=512 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Tuple=1E-12 , lowercase_ : Any=0 , lowercase_ : Tuple="absolute" , lowercase_ : Any=True , **lowercase_ : Optional[Any] , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowercase_ : int = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Tuple = hidden_act
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Tuple = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : str = layer_norm_eps
lowercase_ : List[Any] = position_embedding_type
lowercase_ : List[Any] = use_cache
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowercase : str = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __magic_name__ ( unittest.TestCase):
UpperCamelCase__ = JukeboxTokenizer
UpperCamelCase__ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
import torch
lowercase_ : str = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
lowercase_ : str = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowercase_ : Tuple = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Any ):
import torch
lowercase_ : Tuple = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
lowercase_ : str = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowercase_ : Dict = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase__ = '''bit'''
UpperCamelCase__ = ['''preactivation''', '''bottleneck''']
UpperCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self : List[str] , lowercase_ : Union[str, Any]=3 , lowercase_ : Tuple=64 , lowercase_ : List[str]=[256, 512, 1024, 2048] , lowercase_ : List[str]=[3, 4, 6, 3] , lowercase_ : Any="preactivation" , lowercase_ : List[Any]="relu" , lowercase_ : Dict=None , lowercase_ : List[Any]=32 , lowercase_ : int=0.0 , lowercase_ : Optional[Any]=False , lowercase_ : Any=32 , lowercase_ : Optional[Any]=1 , lowercase_ : str=None , lowercase_ : Dict=None , **lowercase_ : List[Any] , ):
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase_ : Union[str, Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
lowercase_ : Union[str, Any] = num_channels
lowercase_ : str = embedding_size
lowercase_ : Tuple = hidden_sizes
lowercase_ : Tuple = depths
lowercase_ : Tuple = layer_type
lowercase_ : List[str] = hidden_act
lowercase_ : str = global_padding
lowercase_ : List[Any] = num_groups
lowercase_ : Tuple = drop_path_rate
lowercase_ : Optional[Any] = embedding_dynamic_padding
lowercase_ : Optional[Any] = output_stride
lowercase_ : Union[str, Any] = width_factor
lowercase_ : int = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowercase_ , lowercase_ : Dict = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''linear'''
UpperCamelCase__ = '''cosine'''
UpperCamelCase__ = '''cosine_with_restarts'''
UpperCamelCase__ = '''polynomial'''
UpperCamelCase__ = '''constant'''
UpperCamelCase__ = '''constant_with_warmup'''
UpperCamelCase__ = '''piecewise_constant'''
def lowerCamelCase ( UpperCAmelCase__ : Optimizer , UpperCAmelCase__ : int = -1 ) -> Any:
return LambdaLR(UpperCAmelCase__ , lambda UpperCAmelCase__ : 1 , last_epoch=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optimizer , UpperCAmelCase__ : int , UpperCAmelCase__ : int = -1 ) -> List[Any]:
def lr_lambda(UpperCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase__ ) / float(max(1.0 , UpperCAmelCase__ ) )
return 1.0
return LambdaLR(UpperCAmelCase__ , UpperCAmelCase__ , last_epoch=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optimizer , UpperCAmelCase__ : str , UpperCAmelCase__ : int = -1 ) -> Any:
lowercase_ : List[str] = {}
lowercase_ : Union[str, Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : int = rule_str.split(""":""" )
lowercase_ : Union[str, Any] = int(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = float(UpperCAmelCase__ )
lowercase_ : Any = value
lowercase_ : Tuple = float(rule_list[-1] )
def create_rules_function(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ):
def rule_func(UpperCAmelCase__ : int ) -> float:
lowercase_ : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(UpperCAmelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : List[str] = create_rules_function(UpperCAmelCase__ , UpperCAmelCase__ )
return LambdaLR(UpperCAmelCase__ , UpperCAmelCase__ , last_epoch=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=-1 ) -> Optional[int]:
def lr_lambda(UpperCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase__ ) / float(max(1 , UpperCAmelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optimizer , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0.5 , UpperCAmelCase__ : int = -1 ) -> Optional[int]:
def lr_lambda(UpperCAmelCase__ : Optional[Any] ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase__ ) / float(max(1 , UpperCAmelCase__ ) )
lowercase_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(UpperCAmelCase__ ) * 2.0 * progress )) )
return LambdaLR(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optimizer , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = -1 ) -> str:
def lr_lambda(UpperCAmelCase__ : Dict ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase__ ) / float(max(1 , UpperCAmelCase__ ) )
lowercase_ : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(UpperCAmelCase__ ) * progress) % 1.0) )) )
return LambdaLR(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=1e-7 , UpperCAmelCase__ : Optional[Any]=1.0 , UpperCAmelCase__ : Dict=-1 ) -> List[str]:
lowercase_ : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(UpperCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase__ ) / float(max(1 , UpperCAmelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : str = lr_init - lr_end
lowercase_ : str = num_training_steps - num_warmup_steps
lowercase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : Dict = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_lowercase : Tuple = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase ( UpperCAmelCase__ : Union[str, SchedulerType] , UpperCAmelCase__ : Optimizer , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : int = -1 , ) -> str:
lowercase_ : Any = SchedulerType(UpperCAmelCase__ )
lowercase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(UpperCAmelCase__ , last_epoch=UpperCAmelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(UpperCAmelCase__ , step_rules=UpperCAmelCase__ , last_epoch=UpperCAmelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(UpperCAmelCase__ , num_warmup_steps=UpperCAmelCase__ , last_epoch=UpperCAmelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
UpperCAmelCase__ , num_warmup_steps=UpperCAmelCase__ , num_training_steps=UpperCAmelCase__ , num_cycles=UpperCAmelCase__ , last_epoch=UpperCAmelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
UpperCAmelCase__ , num_warmup_steps=UpperCAmelCase__ , num_training_steps=UpperCAmelCase__ , power=UpperCAmelCase__ , last_epoch=UpperCAmelCase__ , )
return schedule_func(
UpperCAmelCase__ , num_warmup_steps=UpperCAmelCase__ , num_training_steps=UpperCAmelCase__ , last_epoch=UpperCAmelCase__ )
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_lowercase : Optional[int] = random.Random()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int=1.0 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=None ) -> List[str]:
if rng is None:
lowercase_ : str = global_rng
lowercase_ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ ( unittest.TestCase):
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : List[Any]=400 , lowercase_ : List[Any]=2000 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=0.0 , lowercase_ : Any=16000 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=True , ):
lowercase_ : List[str] = parent
lowercase_ : int = batch_size
lowercase_ : str = min_seq_length
lowercase_ : int = max_seq_length
lowercase_ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ : str = feature_size
lowercase_ : Optional[Any] = padding_value
lowercase_ : Dict = sampling_rate
lowercase_ : List[str] = return_attention_mask
lowercase_ : str = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : str ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any]=False , lowercase_ : str=False ):
def _flatten(lowercase_ : str ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
lowercase_ : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase_ : Optional[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ : List[Any] = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = WavaVecaFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = WavaVecaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Dict = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
lowercase_ : str = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowercase_ : List[str] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
lowercase_ : List[str] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
lowercase_ : Union[str, Any] = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ : str = np.asarray(lowercase_ )
lowercase_ : Any = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
lowercase_ : Any = feat_extract(lowercase_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
lowercase_ : Optional[int] = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
lowercase_ : Union[str, Any] = feat_extract(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors="""np""" )
lowercase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[str] = range(800 , 1400 , 200 )
lowercase_ : Any = [floats_list((1, x) )[0] for x in lengths]
lowercase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
lowercase_ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
lowercase_ : Optional[Any] = feat_extract(lowercase_ , max_length=lowercase_ , padding=lowercase_ )
lowercase_ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Any = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
lowercase_ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Tuple = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
lowercase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Optional[int] = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
lowercase_ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
import torch
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Any = np.random.rand(100 ).astype(np.floataa )
lowercase_ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase_ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowercase_ : Any = WavaVecaConfig.from_pretrained(lowercase_ )
lowercase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[Any]:
lowercase_ : Dict = {}
lowercase_ : int = tokenizer(example["""content"""] , truncation=UpperCAmelCase__ )["""input_ids"""]
lowercase_ : Union[str, Any] = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_lowercase : Union[str, Any] = HfArgumentParser(PretokenizationArguments)
_lowercase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_lowercase : Any = multiprocessing.cpu_count()
_lowercase : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowercase : Optional[int] = time.time()
_lowercase : str = load_dataset(args.dataset_name, split="train")
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
_lowercase : Dict = time.time()
_lowercase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
_lowercase : int = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> list[str]:
lowercase_ : Union[str, Any] = []
lowercase_ : List[str] = 11
lowercase_ : Union[str, Any] = int("""1""" + """0""" * digit_len )
for num in range(UpperCAmelCase__ , UpperCAmelCase__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase__ , UpperCAmelCase__ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowercase_ : List[Any] = 10
return solutions
def lowerCamelCase ( UpperCAmelCase__ : int = 2 ) -> int:
lowercase_ : int = 1.0
for fraction in fraction_list(UpperCAmelCase__ ):
lowercase_ : List[str] = Fraction(UpperCAmelCase__ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_lowercase : str = logging.getLogger()
_lowercase : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ ( _UpperCAmelCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Any ):
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase_ : str = {"""source""": """What is love ?""", """target""": """life"""}
lowercase_ : List[str] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowercase_ : Optional[Any] = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int , lowercase_ : str = "pytorch" ):
lowercase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowercase_ : List[Any] = os.path.join(lowercase_ , """output""" )
lowercase_ : Any = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowercase_ : Dict = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowercase_ : Any = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowercase_ : List[str] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowercase_ : Optional[int] = json.load(lowercase_ )
return result
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : str = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Union[str, Any] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Any = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowercase : Union[str, Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
lowercase_ : Optional[Any] = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
return sd
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=rename_keys_prefix ) -> List[str]:
lowercase_ : List[str] = OrderedDict()
lowercase_ : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase_ : Dict = key
for name_pair in rename_keys_prefix:
lowercase_ : Any = new_key.replace(name_pair[0] , name_pair[1] )
lowercase_ : Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase_ : str = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowercase_ : List[str] = """pretraining"""
if "vcr" in checkpoint_path:
lowercase_ : List[Any] = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowercase_ : Any = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
lowercase_ : str = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
lowercase_ : List[Any] = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowercase_ : Any = {"""visual_embedding_dim""": 512}
lowercase_ : Dict = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowercase_ : Optional[int] = {"""visual_embedding_dim""": 2048}
lowercase_ : List[Any] = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowercase_ : int = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
lowercase_ : Tuple = """vqa"""
elif "nlvr" in checkpoint_path:
lowercase_ : Any = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
lowercase_ : int = """nlvr"""
lowercase_ : List[Any] = VisualBertConfig(**UpperCAmelCase__ )
# Load State Dict
lowercase_ : str = load_state_dict(UpperCAmelCase__ )
lowercase_ : str = get_new_dict(UpperCAmelCase__ , UpperCAmelCase__ )
if model_type == "pretraining":
lowercase_ : Optional[Any] = VisualBertForPreTraining(UpperCAmelCase__ )
elif model_type == "vqa":
lowercase_ : Optional[int] = VisualBertForQuestionAnswering(UpperCAmelCase__ )
elif model_type == "nlvr":
lowercase_ : Optional[Any] = VisualBertForVisualReasoning(UpperCAmelCase__ )
elif model_type == "multichoice":
lowercase_ : Tuple = VisualBertForMultipleChoice(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
# Save Checkpoints
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowercase : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowercase_ : List[str] = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(lowercase_ )
from datasets import load_dataset
lowercase_ : Any = load_dataset("""nielsr/rvlcdip-demo""" )
lowercase_ : Optional[Any] = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowercase_ : str = image_processor(lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
lowercase_ : Dict = outputs.logits
lowercase_ : Optional[int] = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase_ )
lowercase_ : List[str] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=lowercase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Any = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
_lowercase : Dict = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : float ) -> float:
if edge <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> float:
if edge <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = GPTaTokenizer
UpperCamelCase__ = GPTaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''add_prefix_space''': True}
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowercase_ : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ : Tuple = {"""unk_token""": """<unk>"""}
lowercase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , **lowercase_ : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : List[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Any ):
lowercase_ : Dict = """lower newer"""
lowercase_ : int = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : Union[str, Any] = """lower newer"""
lowercase_ : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase_ : Optional[int] = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowercase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : List[str] = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
lowercase_ : List[str] = """lower newer"""
# Testing tokenization
lowercase_ : Dict = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : int = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
lowercase_ : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Optional[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
lowercase_ : Dict = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Dict = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing the unknown token
lowercase_ : str = tokens + [rust_tokenizer.unk_token]
lowercase_ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *lowercase_ : str , **lowercase_ : Optional[int] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
lowercase_ : Dict = """This is a simple input"""
lowercase_ : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ : Tuple = ("""This is a simple input""", """This is a pair""")
lowercase_ : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
lowercase_ : str = """This is a simple input"""
lowercase_ : str = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowercase_ : Optional[int] = ("""This is a simple input""", """This is a pair""")
lowercase_ : Dict = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowercase_ : int = tokenizer.pad_token_id
lowercase_ : Any = tokenizer(lowercase_ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
lowercase_ : int = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="""np""" )
lowercase_ : str = tokenizer(*lowercase_ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
lowercase_ : Optional[Any] = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Tuple = """$$$"""
lowercase_ : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase_ , add_bos_token=lowercase_ )
lowercase_ : List[Any] = """This is a simple input"""
lowercase_ : str = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ : int = tokenizer.bos_token_id
lowercase_ : Dict = tokenizer(lowercase_ )
lowercase_ : Tuple = tokenizer(lowercase_ )
self.assertEqual(out_s.input_ids[0] , lowercase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase_ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase_ : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase_ : Tuple = [self.get_tokenizer(do_lower_case=lowercase_ , add_bos_token=lowercase_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowercase_ : int = """Encode this."""
lowercase_ : Tuple = """This one too please."""
lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
encoded_sequence += tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase_ : List[Any] = tokenizer.encode_plus(
lowercase_ , lowercase_ , add_special_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , )
lowercase_ : int = encoded_sequence_dict["""input_ids"""]
lowercase_ : List[str] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
lowercase_ : int = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase_ )
]
lowercase_ : List[str] = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase_ , lowercase_ )
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase_ : str = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase_ )
lowercase_ : Optional[int] = """A photo of a cat"""
lowercase_ : Any = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""./test_opt""" )
lowercase_ : List[Any] = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : int = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=lowercase_ )
lowercase_ : List[Any] = """A photo of a cat"""
lowercase_ : List[Any] = tokenizer.encode(
lowercase_ , )
# Same as above
self.assertEqual(lowercase_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase_ )
lowercase_ : Union[str, Any] = """bos"""
lowercase_ : int = tokenizer.get_vocab()["""bos"""]
lowercase_ : Union[str, Any] = """A photo of a cat"""
lowercase_ : Dict = tokenizer.encode(
lowercase_ , )
# We changed the bos token
self.assertEqual(lowercase_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
lowercase_ : Tuple = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [31957, 250, 1345, 9, 10, 4758] )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : str = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
import os
from math import logaa
def lowerCamelCase ( UpperCAmelCase__ : str = "base_exp.txt" ) -> int:
lowercase_ : float = 0
lowercase_ : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCAmelCase__ ) , UpperCAmelCase__ ) ) ):
lowercase_ , lowercase_ : Optional[int] = list(map(UpperCAmelCase__ , line.split(""",""" ) ) )
if x * logaa(UpperCAmelCase__ ) > largest:
lowercase_ : Tuple = x * logaa(UpperCAmelCase__ )
lowercase_ : int = i + 1
return result
if __name__ == "__main__":
print(solution())
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
lowercase_ : Optional[int] = [False] * len(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = [-1] * len(UpperCAmelCase__ )
def dfs(UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ):
lowercase_ : List[str] = True
lowercase_ : str = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowercase : List[str] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Any = ["model.decoder.embed_positions.weights"]
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> List[str]:
if "emb" in name:
lowercase_ : Tuple = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowercase_ : Optional[int] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowercase_ : int = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowercase_ : Tuple = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowercase_ : List[Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowercase_ : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowercase_ : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowercase_ : List[Any] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowercase_ : Union[str, Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowercase_ : List[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase_ : List[str] = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase ( UpperCAmelCase__ : OrderedDict , UpperCAmelCase__ : int ) -> Tuple[Dict, Dict]:
lowercase_ : Any = list(state_dict.keys() )
lowercase_ : Optional[int] = {}
for key in keys:
lowercase_ : Optional[int] = state_dict.pop(UpperCAmelCase__ )
lowercase_ : Any = rename_keys(UpperCAmelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase_ : Optional[Any] = val[:hidden_size, :]
lowercase_ : List[str] = val[hidden_size : 2 * hidden_size, :]
lowercase_ : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase_ : int = val
else:
lowercase_ : str = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase ( UpperCAmelCase__ : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowercase_ : int = 1024
lowercase_ : List[str] = 24
lowercase_ : List[Any] = 16
elif checkpoint == "medium":
lowercase_ : Dict = 1536
lowercase_ : Union[str, Any] = 48
lowercase_ : List[Any] = 24
elif checkpoint == "large":
lowercase_ : Tuple = 2048
lowercase_ : Union[str, Any] = 48
lowercase_ : Tuple = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowercase_ : Union[str, Any] = MusicgenDecoderConfig(
hidden_size=UpperCAmelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict="cpu" ) -> List[Any]:
lowercase_ : List[str] = MusicGen.get_pretrained(UpperCAmelCase__ , device=UpperCAmelCase__ )
lowercase_ : Optional[Any] = decoder_config_from_checkpoint(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = fairseq_model.lm.state_dict()
lowercase_ , lowercase_ : Optional[int] = rename_state_dict(
UpperCAmelCase__ , hidden_size=decoder_config.hidden_size )
lowercase_ : int = TaEncoderModel.from_pretrained("""t5-base""" )
lowercase_ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowercase_ : Optional[Any] = MusicgenForCausalLM(UpperCAmelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase_ , lowercase_ : Union[str, Any] = decoder.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowercase_ : Tuple = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase__ , audio_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCAmelCase__ )
# check we can do a forward pass
lowercase_ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase_ : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase_ : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowercase_ : List[str] = AutoTokenizer.from_pretrained("""t5-base""" )
lowercase_ : Optional[int] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowercase_ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
# set the appropriate bos/pad token ids
lowercase_ : int = 2048
lowercase_ : Union[str, Any] = 2048
# set other default generation config params
lowercase_ : List[str] = int(30 * audio_encoder.config.frame_rate )
lowercase_ : List[Any] = True
lowercase_ : Union[str, Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCAmelCase__ )
processor.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_lowercase : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Dict , lowercase_ : str=13 , lowercase_ : int=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=99 , lowercase_ : Optional[int]=32 , lowercase_ : Any=5 , lowercase_ : List[str]=4 , lowercase_ : Any=37 , lowercase_ : str="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=512 , lowercase_ : Union[str, Any]=16 , lowercase_ : Any=2 , lowercase_ : int=0.02 , lowercase_ : Tuple=3 , lowercase_ : str=4 , lowercase_ : int=None , ):
lowercase_ : Union[str, Any] = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : Union[str, Any] = is_training
lowercase_ : List[Any] = use_input_mask
lowercase_ : Tuple = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : Any = type_sequence_label_size
lowercase_ : Tuple = initializer_range
lowercase_ : int = num_labels
lowercase_ : Union[str, Any] = num_choices
lowercase_ : Optional[Any] = scope
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Tuple = None
if self.use_input_mask:
lowercase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[Any] = None
if self.use_token_type_ids:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : Optional[Any] = None
if self.use_labels:
lowercase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[Any] = LlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ )
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Tuple , ):
lowercase_ : int = True
lowercase_ : Tuple = LlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowercase_ : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
lowercase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , ):
lowercase_ : Dict = LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[Any] , ):
lowercase_ : str = True
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
lowercase_ : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
lowercase_ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )["""hidden_states"""][0]
lowercase_ : List[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["""hidden_states"""][0]
# select random slice
lowercase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = config_and_inputs
lowercase_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : str = LlamaModelTester(self )
lowercase_ : Dict = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : List[Any] = type
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = 3
lowercase_ : List[str] = input_dict["""input_ids"""]
lowercase_ : Any = input_ids.ne(1 ).to(lowercase_ )
lowercase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : List[Any] = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = 3
lowercase_ : Tuple = """single_label_classification"""
lowercase_ : str = input_dict["""input_ids"""]
lowercase_ : List[str] = input_ids.ne(1 ).to(lowercase_ )
lowercase_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : str = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = 3
lowercase_ : Tuple = """multi_label_classification"""
lowercase_ : Tuple = input_dict["""input_ids"""]
lowercase_ : List[str] = input_ids.ne(1 ).to(lowercase_ )
lowercase_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ : str = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def SCREAMING_SNAKE_CASE_ ( self : str ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size )
lowercase_ : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : str = LlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
lowercase_ : Any = original_model(lowercase_ ).last_hidden_state
lowercase_ : Dict = original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : str = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ : List[str] = LlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
lowercase_ : Optional[int] = scaled_model(lowercase_ ).last_hidden_state
lowercase_ : List[str] = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
@require_torch
class __magic_name__ ( unittest.TestCase):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
lowercase_ : Optional[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase_ : List[Any] = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Tuple = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase_ : Any = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
lowercase_ : int = model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
lowercase_ : Tuple = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : List[Any] = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase_ : Any = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
lowercase_ : Union[str, Any] = model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
lowercase_ : Tuple = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Tuple = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
lowercase_ : List[str] = model(torch.tensor(lowercase_ ) )
lowercase_ : int = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase_ : Tuple = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : int = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
lowercase_ : Optional[Any] = """Simply put, the theory of relativity states that """
lowercase_ : Dict = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
lowercase_ : Any = tokenizer.encode(lowercase_ , return_tensors="""pt""" )
lowercase_ : int = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=lowercase_ )
# greedy generation outputs
lowercase_ : List[str] = model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ )
lowercase_ : Any = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = ShapEImgaImgPipeline
UpperCamelCase__ = ['''image''']
UpperCamelCase__ = ['''image''']
UpperCamelCase__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ = False
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return 8
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase_ : Tuple = CLIPVisionModel(lowercase_ )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowercase_ : Optional[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase_ : Union[str, Any] = PriorTransformer(**lowercase_ )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
torch.manual_seed(0 )
lowercase_ : str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase_ : int = ShapERenderer(**lowercase_ )
return model
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.dummy_prior
lowercase_ : Union[str, Any] = self.dummy_image_encoder
lowercase_ : Union[str, Any] = self.dummy_image_processor
lowercase_ : List[Any] = self.dummy_renderer
lowercase_ : List[str] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowercase_ , clip_sample=lowercase_ , clip_sample_range=1.0 , )
lowercase_ : int = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[str]=0 ):
lowercase_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : int = torch.manual_seed(lowercase_ )
else:
lowercase_ : Dict = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : int = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[Any] = """cpu"""
lowercase_ : Union[str, Any] = self.get_dummy_components()
lowercase_ : Optional[Any] = self.pipeline_class(**lowercase_ )
lowercase_ : Tuple = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : int = pipe(**self.get_dummy_inputs(lowercase_ ) )
lowercase_ : Union[str, Any] = output.images[0]
lowercase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase_ : Any = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : str ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Union[str, Any] = torch_device == """cpu"""
lowercase_ : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase_ , relax_max_difference=lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**lowercase_ )
lowercase_ : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : List[str] = 1
lowercase_ : Optional[Any] = 2
lowercase_ : Optional[Any] = self.get_dummy_inputs(lowercase_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase_ : Tuple = batch_size * [inputs[key]]
lowercase_ : List[Any] = pipe(**lowercase_ , num_images_per_prompt=lowercase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowercase_ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowercase_ : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowercase_ : List[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
lowercase_ : List[Any] = pipe(
lowercase_ , generator=lowercase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowercase : Dict = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_lowercase : Dict = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_lowercase : Optional[int] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_lowercase : List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_lowercase : Any = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[int]=[1, 10, 100] , lowercase_ : List[str]=4 , lowercase_ : Any=3.0 ):
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=lowercase_ ) as executor:
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = Counter()
lowercase_ : Union[str, Any] = 0
lowercase_ : List[str] = defaultdict(lowercase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowercase_ , lowercase_ ) ):
for candidate in candidates:
lowercase_ : Any = candidate + """\n""" + test_case
lowercase_ : Dict = (test_program, timeout, task_id, completion_id[task_id])
lowercase_ : Union[str, Any] = executor.submit(lowercase_ , *lowercase_ )
futures.append(lowercase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowercase_ ):
lowercase_ : Optional[Any] = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
lowercase_ , lowercase_ : int = [], []
for result in results.values():
result.sort()
lowercase_ : str = [r[1]["""passed"""] for r in result]
total.append(len(lowercase_ ) )
correct.append(sum(lowercase_ ) )
lowercase_ : List[str] = np.array(lowercase_ )
lowercase_ : List[Any] = np.array(lowercase_ )
lowercase_ : Optional[int] = k
lowercase_ : List[Any] = {f'''pass@{k}''': estimate_pass_at_k(lowercase_ , lowercase_ , lowercase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ) -> int:
def estimator(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Any = itertools.repeat(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
else:
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = iter(UpperCAmelCase__ )
return np.array([estimator(int(UpperCAmelCase__ ) , int(UpperCAmelCase__ ) , UpperCAmelCase__ ) for n, c in zip(UpperCAmelCase__ , UpperCAmelCase__ )] )
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int = 13 , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 3 , lowercase_ : int = 3 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : int = 128 , lowercase_ : str=[16, 32, 64, 128] , lowercase_ : int = 7 , lowercase_ : int = 4 , lowercase_ : int = 37 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 10 , lowercase_ : float = 0.02 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : int = 128 , lowercase_ : List[int] = [2, 2, 2, 2] , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
lowercase_ : Union[str, Any] = parent
lowercase_ : Dict = batch_size
lowercase_ : int = image_size
lowercase_ : Optional[int] = patch_size
lowercase_ : int = num_channels
lowercase_ : int = is_training
lowercase_ : Optional[int] = use_labels
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : int = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Dict = encoder_stride
lowercase_ : List[Any] = num_attention_outputs
lowercase_ : Optional[Any] = embed_dim
lowercase_ : Optional[Any] = embed_dim + 1
lowercase_ : List[str] = resolution
lowercase_ : str = depths
lowercase_ : str = hidden_sizes
lowercase_ : int = dim
lowercase_ : Optional[int] = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : int ):
lowercase_ : List[str] = TFEfficientFormerModel(config=lowercase_ )
lowercase_ : int = model(lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Tuple ):
lowercase_ : Optional[int] = self.type_sequence_label_size
lowercase_ : Any = TFEfficientFormerForImageClassification(lowercase_ )
lowercase_ : Optional[int] = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Tuple = 1
lowercase_ : List[Any] = TFEfficientFormerForImageClassification(lowercase_ )
lowercase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : str = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[Any] = TFEfficientFormerModelTester(self )
lowercase_ : List[Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
lowercase_ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] ):
lowercase_ : int = model_class(lowercase_ )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
lowercase_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
if hasattr(self.model_tester , """encoder_seq_length""" ):
lowercase_ : str = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
lowercase_ : Optional[Any] = seq_length * self.model_tester.chunk_length
else:
lowercase_ : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowercase_ : Dict = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase_ , (list, tuple) )
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : Union[str, Any] = getattr(self.model_tester , """seq_length""" , lowercase_ )
lowercase_ : Optional[Any] = getattr(self.model_tester , """decoder_seq_length""" , lowercase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=False ):
lowercase_ : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TFEfficientFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = True
lowercase_ : Optional[Any] = getattr(self.model_tester , """seq_length""" , lowercase_ )
lowercase_ : Tuple = getattr(self.model_tester , """encoder_seq_length""" , lowercase_ )
lowercase_ : Dict = getattr(self.model_tester , """key_length""" , lowercase_ )
lowercase_ : Optional[int] = getattr(self.model_tester , """chunk_length""" , lowercase_ )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
lowercase_ : Optional[Any] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = False
lowercase_ : List[str] = True
lowercase_ : List[str] = model_class(lowercase_ )
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
lowercase_ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : str = True
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
lowercase_ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowercase_ : Optional[int] = model_class(lowercase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowercase_ : Union[str, Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowercase_ : int = model(lowercase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase ( ) -> Dict:
lowercase_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
lowercase_ : int = self.default_image_processor
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : str = image_processor(images=lowercase_ , return_tensors="""tf""" )
# forward pass
lowercase_ : int = model(**lowercase_ , training=lowercase_ )
# verify the logits
lowercase_ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : List[Any] = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
lowercase_ : int = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : Any = image_processor(images=lowercase_ , return_tensors="""tf""" )
# forward pass
lowercase_ : Any = model(**lowercase_ , training=lowercase_ )
# verify the logits
lowercase_ : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : List[str] = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
_lowercase : Optional[int] = int(input("Enter number: ").strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ) -> None:
lowercase_ : Optional[Any] = len(UpperCAmelCase__ )
print("""The following activities are selected:""" )
# The first activity is always selected
lowercase_ : Optional[int] = 0
print(UpperCAmelCase__ , end=""",""" )
# Consider rest of the activities
for j in range(UpperCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase__ , end=""",""" )
lowercase_ : Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Dict = [1, 3, 0, 5, 8, 5]
_lowercase : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] ):
super().__init__()
lowercase_ : Optional[Any] = nn.Linear(3 , 4 )
lowercase_ : List[str] = nn.BatchNormad(4 )
lowercase_ : List[Any] = nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] ):
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[int] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_ , model.state_dict() )
lowercase_ : Any = os.path.join(lowercase_ , """index.json""" )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowercase_ : str = os.path.join(lowercase_ , f'''{key}.dat''' )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on the fact weights are properly loaded
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowercase_ : Optional[int] = torch.randn(2 , 3 , dtype=lowercase_ )
with TemporaryDirectory() as tmp_dir:
lowercase_ : str = offload_weight(lowercase_ , """weight""" , lowercase_ , {} )
lowercase_ : Optional[Any] = os.path.join(lowercase_ , """weight.dat""" )
self.assertTrue(os.path.isfile(lowercase_ ) )
self.assertDictEqual(lowercase_ , {"""weight""": {"""shape""": [2, 3], """dtype""": str(lowercase_ ).split(""".""" )[1]}} )
lowercase_ : Tuple = load_offloaded_weight(lowercase_ , index["""weight"""] )
self.assertTrue(torch.equal(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Dict = ModelForTest()
lowercase_ : Union[str, Any] = model.state_dict()
lowercase_ : str = {k: v for k, v in state_dict.items() if """linear2""" not in k}
lowercase_ : Optional[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_ , lowercase_ )
lowercase_ : Tuple = OffloadedWeightsLoader(state_dict=lowercase_ , save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_ , weight_map[key] ) )
lowercase_ : Optional[int] = {k: v for k, v in state_dict.items() if """weight""" in k}
lowercase_ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = OffloadedWeightsLoader(state_dict=lowercase_ , save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_ , lowercase_ )
# Duplicates are removed
lowercase_ : Tuple = OffloadedWeightsLoader(state_dict=lowercase_ , save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_ , weight_map[key] ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
lowercase_ : Any = extract_submodules_state_dict(lowercase_ , ["""a.1""", """a.2"""] )
self.assertDictEqual(lowercase_ , {"""a.1""": 0, """a.2""": 2} )
lowercase_ : Any = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
lowercase_ : List[str] = extract_submodules_state_dict(lowercase_ , ["""a.1""", """a.2"""] )
self.assertDictEqual(lowercase_ , {"""a.1.a""": 0, """a.2.a""": 2} )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int=1024 ) -> Dict:
lowercase_ , lowercase_ : Any = [], []
lowercase_ : List[str] = list(zip(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ , lowercase_ : Tuple = sorted_examples[0]
def is_too_big(UpperCAmelCase__ : Dict ):
return tok(UpperCAmelCase__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase_ : str = new_src + """ """ + src
lowercase_ : Union[str, Any] = new_tgt + """ """ + tgt
if is_too_big(UpperCAmelCase__ ) or is_too_big(UpperCAmelCase__ ): # cant fit, finalize example
finished_src.append(UpperCAmelCase__ )
finished_tgt.append(UpperCAmelCase__ )
lowercase_ , lowercase_ : Optional[Any] = src, tgt
else: # can fit, keep adding
lowercase_ , lowercase_ : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCAmelCase__ )
finished_tgt.append(UpperCAmelCase__ )
return finished_src, finished_tgt
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Path , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Union[str, Any]:
lowercase_ : Optional[Any] = Path(UpperCAmelCase__ )
save_path.mkdir(exist_ok=UpperCAmelCase__ )
for split in ["train"]:
lowercase_ , lowercase_ : Dict = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
lowercase_ : List[Any] = [x.rstrip() for x in Path(UpperCAmelCase__ ).open().readlines()]
lowercase_ : Dict = [x.rstrip() for x in Path(UpperCAmelCase__ ).open().readlines()]
lowercase_ , lowercase_ : Union[str, Any] = pack_examples(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''packed {split} split from {len(UpperCAmelCase__ )} examples -> {len(UpperCAmelCase__ )}.''' )
Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(UpperCAmelCase__ ) )
Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(UpperCAmelCase__ ) )
for split in ["val", "test"]:
lowercase_ , lowercase_ : Dict = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(UpperCAmelCase__ , save_path / F'''{split}.source''' )
shutil.copyfile(UpperCAmelCase__ , save_path / F'''{split}.target''' )
def lowerCamelCase ( ) -> Tuple:
lowercase_ : int = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCAmelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase__ , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCAmelCase__ )
parser.add_argument("""--save_path""" , type=UpperCAmelCase__ )
lowercase_ : int = parser.parse_args()
lowercase_ : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCAmelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
_lowercase : List[Any] = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = (CMStochasticIterativeScheduler,)
UpperCamelCase__ = 10
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **lowercase_ : List[str] ):
lowercase_ : List[str] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
config.update(**lowercase_ )
return config
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = 10
lowercase_ : Any = self.get_scheduler_config()
lowercase_ : Dict = self.scheduler_classes[0](**lowercase_ )
scheduler.set_timesteps(lowercase_ )
lowercase_ : Dict = scheduler.timesteps[0]
lowercase_ : Optional[int] = scheduler.timesteps[1]
lowercase_ : Optional[Any] = self.dummy_sample
lowercase_ : Any = 0.1 * sample
lowercase_ : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : Any = self.get_scheduler_config()
lowercase_ : Dict = scheduler_class(**lowercase_ )
lowercase_ : Optional[int] = 1
scheduler.set_timesteps(lowercase_ )
lowercase_ : Any = scheduler.timesteps
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : Optional[int] = self.dummy_model()
lowercase_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowercase_ ):
# 1. scale model input
lowercase_ : int = scheduler.scale_model_input(lowercase_ , lowercase_ )
# 2. predict noise residual
lowercase_ : Optional[int] = model(lowercase_ , lowercase_ )
# 3. predict previous sample x_t-1
lowercase_ : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase_ : str = pred_prev_sample
lowercase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
lowercase_ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config()
lowercase_ : List[str] = scheduler_class(**lowercase_ )
lowercase_ : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
lowercase_ : str = scheduler.timesteps
lowercase_ : Any = torch.manual_seed(0 )
lowercase_ : List[Any] = self.dummy_model()
lowercase_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase_ : Any = scheduler.scale_model_input(lowercase_ , lowercase_ )
# 2. predict noise residual
lowercase_ : List[Any] = model(lowercase_ , lowercase_ )
# 3. predict previous sample x_t-1
lowercase_ : Optional[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase_ : Dict = pred_prev_sample
lowercase_ : List[str] = torch.sum(torch.abs(lowercase_ ) )
lowercase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Union[str, Any] = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config()
lowercase_ : Optional[Any] = scheduler_class(**lowercase_ )
lowercase_ : List[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(lowercase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config()
lowercase_ : Optional[Any] = scheduler_class(**lowercase_ )
lowercase_ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase_ : Optional[Any] = len(lowercase_ )
with self.assertRaises(lowercase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = self.scheduler_classes[0]
lowercase_ : str = self.get_scheduler_config()
lowercase_ : Optional[int] = scheduler_class(**lowercase_ )
lowercase_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : str = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''xglm'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=256008 , lowercase_ : Any=2048 , lowercase_ : Any=1024 , lowercase_ : Any=4096 , lowercase_ : Optional[Any]=24 , lowercase_ : str=16 , lowercase_ : Any="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=True , lowercase_ : List[Any]=True , lowercase_ : List[str]=2 , lowercase_ : int=1 , lowercase_ : int=0 , lowercase_ : List[str]=2 , **lowercase_ : Tuple , ):
lowercase_ : Dict = vocab_size
lowercase_ : Tuple = max_position_embeddings
lowercase_ : str = d_model
lowercase_ : Tuple = ffn_dim
lowercase_ : Dict = num_layers
lowercase_ : int = attention_heads
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Any = dropout
lowercase_ : Any = attention_dropout
lowercase_ : int = activation_dropout
lowercase_ : Optional[int] = layerdrop
lowercase_ : List[str] = init_std
lowercase_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Optional[int] = use_cache
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase : Optional[Any] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowercase : int = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowercase : str = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : int ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ):
lowercase_ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase_ : str = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase_ : int = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase_ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Any = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : List[Any] = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ["PerceiverFeatureExtractor"]
_lowercase : Optional[int] = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : List[Any] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''instructblip_vision_model'''
def __init__( self : Union[str, Any] , lowercase_ : List[str]=1408 , lowercase_ : Dict=6144 , lowercase_ : Optional[int]=39 , lowercase_ : Dict=16 , lowercase_ : Tuple=224 , lowercase_ : int=14 , lowercase_ : Optional[int]="gelu" , lowercase_ : Dict=1E-6 , lowercase_ : List[Any]=0.0 , lowercase_ : Tuple=1E-10 , lowercase_ : Union[str, Any]=True , **lowercase_ : Optional[Any] , ):
super().__init__(**lowercase_ )
lowercase_ : List[Any] = hidden_size
lowercase_ : Tuple = intermediate_size
lowercase_ : int = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Dict = patch_size
lowercase_ : Tuple = image_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : str = attention_dropout
lowercase_ : str = layer_norm_eps
lowercase_ : List[Any] = hidden_act
lowercase_ : Tuple = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[Any] ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : List[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowercase_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''instructblip_qformer'''
def __init__( self : Any , lowercase_ : Optional[Any]=30522 , lowercase_ : List[str]=768 , lowercase_ : int=12 , lowercase_ : List[Any]=12 , lowercase_ : int=3072 , lowercase_ : str="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : str=512 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Dict=1E-12 , lowercase_ : Optional[int]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=1408 , **lowercase_ : int , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowercase_ : Tuple = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : Dict = hidden_act
lowercase_ : str = intermediate_size
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : List[str] = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : Tuple = cross_attention_frequency
lowercase_ : List[Any] = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowercase_ : Dict = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''instructblip'''
UpperCamelCase__ = True
def __init__( self : Optional[Any] , lowercase_ : str=None , lowercase_ : List[str]=None , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=32 , **lowercase_ : List[Any] ):
super().__init__(**lowercase_ )
if vision_config is None:
lowercase_ : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
lowercase_ : List[Any] = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
lowercase_ : Union[str, Any] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowercase_ : Any = InstructBlipVisionConfig(**lowercase_ )
lowercase_ : Optional[Any] = InstructBlipQFormerConfig(**lowercase_ )
lowercase_ : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowercase_ : Tuple = CONFIG_MAPPING[text_model_type](**lowercase_ )
lowercase_ : Any = self.text_config.tie_word_embeddings
lowercase_ : List[str] = self.text_config.is_encoder_decoder
lowercase_ : int = num_query_tokens
lowercase_ : Tuple = self.vision_config.hidden_size
lowercase_ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase_ : List[str] = 1.0
lowercase_ : Union[str, Any] = 0.02
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , lowercase_ : InstructBlipVisionConfig , lowercase_ : InstructBlipQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[Any] = self.vision_config.to_dict()
lowercase_ : Any = self.qformer_config.to_dict()
lowercase_ : Dict = self.text_config.to_dict()
lowercase_ : List[str] = self.__class__.model_type
return output
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_lowercase : Tuple = False
class __magic_name__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : int = """A painting of a squirrel eating a burger """
lowercase_ : List[Any] = torch.manual_seed(0 )
lowercase_ : Any = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : str = generator.manual_seed(0 )
lowercase_ : Tuple = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : List[str] = """A painting of a squirrel eating a burger """
lowercase_ : List[str] = torch.manual_seed(0 )
lowercase_ : Optional[Any] = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
lowercase_ : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : str = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.