code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A (UpperCAmelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=lowercase_ , vae=lowercase_ , scheduler=lowercase_ )
# create a imagenet -> id dictionary for easier use
_snake_case : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
_snake_case : List[Any] = int(lowercase_ )
_snake_case : List[str] = dict(sorted(self.labels.items() ) )
def __a ( self , lowercase_ ) -> List[int]:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
_snake_case : List[Any] = list(lowercase_ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_ = 4.0 , lowercase_ = None , lowercase_ = 50 , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_snake_case : Union[str, Any] = len(lowercase_ )
_snake_case : Optional[Any] = self.transformer.config.sample_size
_snake_case : Optional[int] = self.transformer.config.in_channels
_snake_case : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase_ , device=self.device , dtype=self.transformer.dtype , )
_snake_case : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_snake_case : str = torch.tensor(lowercase_ , device=self.device ).reshape(-1 )
_snake_case : str = torch.tensor([1000] * batch_size , device=self.device )
_snake_case : Dict = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_snake_case : str = latent_model_input[: len(lowercase_ ) // 2]
_snake_case : Tuple = torch.cat([half, half] , dim=0 )
_snake_case : Tuple = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
_snake_case : Optional[int] = t
if not torch.is_tensor(lowercase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_snake_case : Optional[int] = latent_model_input.device.type == """mps"""
if isinstance(lowercase_ , lowercase_ ):
_snake_case : Union[str, Any] = torch.floataa if is_mps else torch.floataa
else:
_snake_case : Any = torch.intaa if is_mps else torch.intaa
_snake_case : Tuple = torch.tensor([timesteps] , dtype=lowercase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_snake_case : Dict = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : str = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_snake_case : Tuple = self.transformer(
lowercase_ , timestep=lowercase_ , class_labels=lowercase_ ).sample
# perform guidance
if guidance_scale > 1:
_snake_case : Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_snake_case : Optional[int] = torch.split(lowercase_ , len(lowercase_ ) // 2 , dim=0 )
_snake_case : Optional[int] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_snake_case : str = torch.cat([half_eps, half_eps] , dim=0 )
_snake_case : List[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_snake_case : Optional[Any] = torch.split(lowercase_ , lowercase_ , dim=1 )
else:
_snake_case : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
_snake_case : Tuple = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
if guidance_scale > 1:
_snake_case : List[str] = latent_model_input.chunk(2 , dim=0 )
else:
_snake_case : str = latent_model_input
_snake_case : Optional[Any] = 1 / self.vae.config.scaling_factor * latents
_snake_case : Tuple = self.vae.decode(lowercase_ ).sample
_snake_case : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_snake_case : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_snake_case : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase_ )
| 326
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'Speech2TextFeatureExtractor'
UpperCamelCase : Optional[Any] = 'Speech2TextTokenizer'
def __init__( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor
SCREAMING_SNAKE_CASE_: List[Any] =False
def __call__( self : Dict , *lowerCAmelCase : str , **lowerCAmelCase : str ) -> str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""raw_speech""" )
else:
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""audio""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =kwargs.pop("""sampling_rate""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""text""" , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_: List[str] =args[0]
SCREAMING_SNAKE_CASE_: List[str] =args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_: Any =encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : int ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@contextmanager
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_: Dict =self.tokenizer
yield
SCREAMING_SNAKE_CASE_: int =self.feature_extractor
SCREAMING_SNAKE_CASE_: str =False
| 409
| 0
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=16 , _lowercase=36 , _lowercase=6 , _lowercase=6 , _lowercase=6 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> List[Any]:
lowercase_ : List[Any] = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : Any = seq_length
lowercase_ : List[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[int] = use_token_type_ids
lowercase_ : Optional[Any] = use_labels
lowercase_ : Optional[int] = vocab_size
lowercase_ : Union[str, Any] = embedding_size
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_hidden_groups
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Any = type_vocab_size
lowercase_ : Any = type_sequence_label_size
lowercase_ : str = initializer_range
lowercase_ : Optional[Any] = num_labels
lowercase_ : List[str] = num_choices
lowercase_ : Tuple = scope
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Tuple = None
if self.use_input_mask:
lowercase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Any = None
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[Any] = None
if self.use_labels:
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ) -> Any:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
lowercase_ : Dict = AlbertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowercase_ : Dict = model(_lowercase , token_type_ids=_lowercase )
lowercase_ : str = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
lowercase_ : Optional[int] = AlbertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Dict = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , sentence_order_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
lowercase_ : Optional[Any] = AlbertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Dict = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : Optional[Any] = AlbertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
lowercase_ : Dict = self.num_labels
lowercase_ : Union[str, Any] = AlbertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
lowercase_ : List[Any] = self.num_labels
lowercase_ : Optional[Any] = AlbertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : List[Any] = self.num_choices
lowercase_ : Tuple = AlbertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : int = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> str:
lowercase_ : List[Any] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
lowercase_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
lowercase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : List[str] = AlbertModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCamelCase__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Tuple = type
self.model_tester.create_and_check_model(*_lowercase )
@slow
def lowerCamelCase__ ( self ) -> Union[str, Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = AlbertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : List[str] = AlbertModel.from_pretrained('albert-base-v2' )
lowercase_ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase_ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : List[Any] = model(_lowercase , attention_mask=_lowercase )[0]
lowercase_ : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowercase )
lowercase_ : Tuple = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 7
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE_ )
self.set_fail_transitions()
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
for character in keyword:
SCREAMING_SNAKE_CASE_ = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE_ = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE_ = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 0
while q:
SCREAMING_SNAKE_CASE_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.adlist[r]['''fail_state''']
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE_ = self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE_ = self.find_next_state(
SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE_ = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE_ = self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE_ = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE_ = []
result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase ( __a, __a, __a, __a = 100, ):
SCREAMING_SNAKE_CASE_ = x_start
SCREAMING_SNAKE_CASE_ = fnc(__a )
SCREAMING_SNAKE_CASE_ = 0.0
for _ in range(__a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
SCREAMING_SNAKE_CASE_ = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE_ = fnc(__a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = fxa
return area
if __name__ == "__main__":
def _lowerCamelCase ( __a ):
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
lowerCAmelCase__ = 10
while i <= 100000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 626
| 1
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
a = data_utils.TransfoXLTokenizer
a = data_utils.TransfoXLCorpus
a = data_utils
a = data_utils
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case , """rb""" ) as fp:
_UpperCAmelCase = pickle.load(snake_case , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f"Save vocabulary to {pytorch_vocab_dump_path}" )
_UpperCAmelCase = corpus.vocab.__dict__
torch.save(snake_case , snake_case )
_UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , snake_case )
_UpperCAmelCase = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(snake_case , snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase = os.path.abspath(snake_case )
_UpperCAmelCase = os.path.abspath(snake_case )
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase = TransfoXLConfig()
else:
_UpperCAmelCase = TransfoXLConfig.from_json_file(snake_case )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TransfoXLLMHeadModel(snake_case )
_UpperCAmelCase = load_tf_weights_in_transfo_xl(snake_case , snake_case , snake_case )
# Save pytorch-model
_UpperCAmelCase = os.path.join(snake_case , snake_case )
_UpperCAmelCase = os.path.join(snake_case , snake_case )
print(f"Save PyTorch model to {os.path.abspath(snake_case )}" )
torch.save(model.state_dict() , snake_case )
print(f"Save configuration file to {os.path.abspath(snake_case )}" )
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
a = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 175
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if not conversation_id:
_UpperCAmelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase = []
if generated_responses is None:
_UpperCAmelCase = []
_UpperCAmelCase = conversation_id
_UpperCAmelCase = past_user_inputs
_UpperCAmelCase = generated_responses
_UpperCAmelCase = text
def __eq__( self , _SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
_UpperCAmelCase = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
_UpperCAmelCase = text
def UpperCAmelCase ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase = None
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
self.generated_responses.append(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
_UpperCAmelCase = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
_UpperCAmelCase = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__lowercase , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class _A ( __lowercase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase = self.tokenizer.eos_token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if min_length_for_response is not None:
_UpperCAmelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_SCREAMING_SNAKE_CASE )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
_UpperCAmelCase = self.tokenizer._build_conversation_input_ids(_SCREAMING_SNAKE_CASE )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase = self._legacy_parse_and_tokenize(_SCREAMING_SNAKE_CASE )
if self.framework == "pt":
_UpperCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
_UpperCAmelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
_UpperCAmelCase = max_length - minimum_tokens
_UpperCAmelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase = model_inputs["""attention_mask"""][:, -trim:]
_UpperCAmelCase = model_inputs.pop("""conversation""" )
_UpperCAmelCase = max_length
_UpperCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase = 1
else:
_UpperCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
_UpperCAmelCase = model_outputs["""output_ids"""]
_UpperCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_SCREAMING_SNAKE_CASE )
return conversation
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.tokenizer.eos_token_id
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > self.tokenizer.model_max_length:
_UpperCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 175
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
__lowerCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=_UpperCamelCase ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' ,type=_UpperCamelCase ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=_UpperCamelCase )
return parser.parse_args()
def a__ ( ):
__lowerCamelCase = parse_args()
# Import training_script as a module.
__lowerCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCamelCase = script_fpath.stem
__lowerCamelCase = importlib.import_module(_UpperCamelCase )
# Patch sys.argv
__lowerCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 175
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(__UpperCAmelCase )['''last_hidden_state''']
__lowerCamelCase = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
__lowerCamelCase = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCAmelCase__ = [0.8, 0.9]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
__lowerCamelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
__lowerCamelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
__lowerCamelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = model.generate(input_ids.to(__UpperCAmelCase ) )
__lowerCamelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 175
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ : int = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
lowerCAmelCase_ : List[str] = '▁'
class __SCREAMING_SNAKE_CASE (lowercase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =AlbertTokenizer
def __init__( self : List[Any] , __a : Any=None , __a : Optional[int]=None , __a : Any=True , __a : Dict=True , __a : Tuple=False , __a : List[Any]="[CLS]" , __a : Optional[Any]="[SEP]" , __a : Any="<unk>" , __a : Union[str, Any]="[SEP]" , __a : Optional[Any]="<pad>" , __a : List[str]="[CLS]" , __a : List[Any]="[MASK]" , **__a : Any , ):
_a = (
AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ , normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
else mask_token
)
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def UpperCamelCase__ ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 713
|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def _lowerCamelCase ( lowercase : int = 100_0000 , lowercase : int = 10 ) -> int:
_a = defaultdict(lowercase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_a = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_a = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 521
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Tuple = ["image_processor", "tokenizer"]
a__ : Optional[Any] = "BlipImageProcessor"
a__ : Any = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
_A = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_A = self.image_processor
def __call__( self : List[Any] , __lowerCAmelCase : ImageInput = None , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Union[str, Any] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A = self.tokenizer
_A = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
# add pixel_values
_A = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
if text is not None:
_A = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_A = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def snake_case_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : int , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ) -> Tuple:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 2
|
from __future__ import annotations
def snake_case_ (__A : list[int] , __A : list[int] , __A : list[int] , __A : list[list[str]] , __A : int , ) -> None:
__lowerCAmelCase : Any = len(__A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __A , __A , )
def snake_case_ (__A : int ) -> None:
__lowerCAmelCase : list[list[str]] = []
depth_first_search([] , [] , [] , __A , __A )
# Print all the boards
for board in boards:
for column in board:
print(__A )
print("""""" )
print(len(__A ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 651
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _a ( UpperCAmelCase__ ) -> Dict:
return x + 2
class A__( unittest.TestCase ):
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''x = 3'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3} )
__SCREAMING_SNAKE_CASE = '''x = y'''
__SCREAMING_SNAKE_CASE = {'''y''': 5}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {'''x''': 5, '''y''': 5} )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''y = add_two(x)'''
__SCREAMING_SNAKE_CASE = {'''x''': 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {'''add_two''': add_two} , state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def _a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''x = 3'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3} )
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
__SCREAMING_SNAKE_CASE = {'''x''': 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {'''add_two''': add_two} , state=lowerCamelCase__ )
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''x = 3\ny = 5'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''y''': 5} )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''text = f\'This is x: {x}.\''''
__SCREAMING_SNAKE_CASE = {'''x''': 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
__SCREAMING_SNAKE_CASE = {'''x''': 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''y''': 2} )
__SCREAMING_SNAKE_CASE = {'''x''': 8}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {'''x''': 8, '''y''': 5} )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''test_list = [x, add_two(x)]'''
__SCREAMING_SNAKE_CASE = {'''x''': 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {'''add_two''': add_two} , state=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [3, 5] )
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''test_list''': [3, 5]} )
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''y = x'''
__SCREAMING_SNAKE_CASE = {'''x''': 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''y''': 3} )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''test_list = [x, add_two(x)]\ntest_list[1]'''
__SCREAMING_SNAKE_CASE = {'''x''': 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {'''add_two''': add_two} , state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''test_list''': [3, 5]} )
__SCREAMING_SNAKE_CASE = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
__SCREAMING_SNAKE_CASE = {'''x''': 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {'''add_two''': add_two} , state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''x = 0\nfor i in range(3):\n x = i'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase__ , {'''range''': range} , state=lowerCamelCase__ )
assert result == 2
self.assertDictEqual(lowerCamelCase__ , {'''x''': 2, '''i''': 2} )
| 718
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ =logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__( __magic_name__ ):
def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 690
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[str] = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class __UpperCamelCase ( __lowerCamelCase ):
lowercase : Dict ="roberta"
def __init__( self, lowerCAmelCase=50_265, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCamelCase, bos_token_id=_UpperCamelCase, eos_token_id=_UpperCamelCase, **_UpperCamelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =classifier_dropout
class __UpperCamelCase ( __lowerCamelCase ):
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 676
|
def UpperCAmelCase_ ( __UpperCamelCase ):
if len(__UpperCamelCase ) <= 1:
return lst
SCREAMING_SNAKE_CASE__ =1
while i < len(__UpperCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =lst[i], lst[i - 1]
i -= 1
if i == 0:
SCREAMING_SNAKE_CASE__ =1
return lst
if __name__ == "__main__":
lowerCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase_ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 151
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
class a_( lowercase__ ):
"""simple docstring"""
__snake_case : Optional[Any] =['''pixel_values''']
def __init__( self : int , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Dict , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE = do_convert_rgb
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[str] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''')
SCREAMING_SNAKE_CASE = (size['height'], size['width'])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def __UpperCamelCase ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : int , ) -> Optional[int]:
"""simple docstring"""
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def __UpperCamelCase ( self : Any , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def __UpperCamelCase ( self : List[str] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : str , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={'pixel_values': images} , tensor_type=lowerCAmelCase__)
return encoded_outputs
| 259
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
__UpperCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__UpperCAmelCase = f'down_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__UpperCAmelCase = f'down_blocks.{i}.attentions.{j}.'
__UpperCAmelCase = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__UpperCAmelCase = f'up_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__UpperCAmelCase = f'up_blocks.{i}.attentions.{j}.'
__UpperCAmelCase = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__UpperCAmelCase = f'down_blocks.{i}.downsamplers.0.conv.'
__UpperCAmelCase = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__UpperCAmelCase = f'up_blocks.{i}.upsamplers.0.'
__UpperCAmelCase = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__UpperCAmelCase = "mid_block.attentions.0."
__UpperCAmelCase = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__UpperCAmelCase = f'mid_block.resnets.{j}.'
__UpperCAmelCase = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A_ ( lowercase_ ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__UpperCAmelCase = f'encoder.down_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__UpperCAmelCase = f'down_blocks.{i}.downsamplers.0.'
__UpperCAmelCase = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__UpperCAmelCase = f'up_blocks.{i}.upsamplers.0.'
__UpperCAmelCase = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__UpperCAmelCase = f'decoder.up_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__UpperCAmelCase = f'mid_block.resnets.{i}.'
__UpperCAmelCase = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def A_ ( lowercase_ ) ->Tuple:
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def A_ ( lowercase_ ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
SCREAMING_SNAKE_CASE = reshape_weight_for_sd(lowercase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
__UpperCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__UpperCAmelCase = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__UpperCAmelCase = {"q": 0, "k": 1, "v": 2}
def A_ ( lowercase_ ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
SCREAMING_SNAKE_CASE = k[: -len('.q_proj.weight' )]
SCREAMING_SNAKE_CASE = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE = [None, None, None]
SCREAMING_SNAKE_CASE = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
SCREAMING_SNAKE_CASE = k[: -len('.q_proj.bias' )]
SCREAMING_SNAKE_CASE = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE = [None, None, None]
SCREAMING_SNAKE_CASE = v
continue
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
SCREAMING_SNAKE_CASE = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
SCREAMING_SNAKE_CASE = torch.cat(lowercase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
SCREAMING_SNAKE_CASE = torch.cat(lowercase_ )
return new_state_dict
def A_ ( lowercase_ ) ->Dict:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
__UpperCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__UpperCAmelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
__UpperCAmelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
__UpperCAmelCase = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__UpperCAmelCase = load_file(unet_path, device="cpu")
else:
__UpperCAmelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
__UpperCAmelCase = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
__UpperCAmelCase = load_file(vae_path, device="cpu")
else:
__UpperCAmelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
__UpperCAmelCase = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
__UpperCAmelCase = load_file(text_enc_path, device="cpu")
else:
__UpperCAmelCase = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
__UpperCAmelCase = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
__UpperCAmelCase = convert_unet_state_dict(unet_state_dict)
__UpperCAmelCase = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__UpperCAmelCase = convert_vae_state_dict(vae_state_dict)
__UpperCAmelCase = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__UpperCAmelCase = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__UpperCAmelCase = {"transformer." + k: v for k, v in text_enc_dict.items()}
__UpperCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
__UpperCAmelCase = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
__UpperCAmelCase = convert_text_enc_state_dict(text_enc_dict)
__UpperCAmelCase = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__UpperCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__UpperCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__UpperCAmelCase = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 259
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __snake_case ( UpperCamelCase_ ):
_a = '''perceiver'''
def __init__( self : str , A_ : List[Any]=2_5_6 , A_ : int=1_2_8_0 , A_ : int=7_6_8 , A_ : Optional[int]=1 , A_ : Optional[int]=2_6 , A_ : List[Any]=8 , A_ : int=8 , A_ : Optional[int]=None , A_ : int=None , A_ : Dict="kv" , A_ : Optional[int]=1 , A_ : int=1 , A_ : Optional[Any]="gelu" , A_ : Optional[Any]=0.1 , A_ : Optional[int]=0.02 , A_ : Union[str, Any]=1e-12 , A_ : Any=True , A_ : Optional[int]=2_6_2 , A_ : Any=2_0_4_8 , A_ : Optional[Any]=5_6 , A_ : Dict=[3_6_8, 4_9_6] , A_ : Dict=1_6 , A_ : Optional[Any]=1_9_2_0 , A_ : str=1_6 , A_ : Optional[Any]=[1, 1_6, 2_2_4, 2_2_4] , **A_ : Optional[Any] , ):
super().__init__(**A_)
lowerCAmelCase_ : str = num_latents
lowerCAmelCase_ : Union[str, Any] = d_latents
lowerCAmelCase_ : Union[str, Any] = d_model
lowerCAmelCase_ : Optional[Any] = num_blocks
lowerCAmelCase_ : int = num_self_attends_per_block
lowerCAmelCase_ : Any = num_self_attention_heads
lowerCAmelCase_ : Optional[int] = num_cross_attention_heads
lowerCAmelCase_ : str = qk_channels
lowerCAmelCase_ : Tuple = v_channels
lowerCAmelCase_ : Optional[Any] = cross_attention_shape_for_attention
lowerCAmelCase_ : Tuple = self_attention_widening_factor
lowerCAmelCase_ : Optional[int] = cross_attention_widening_factor
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Optional[int] = layer_norm_eps
lowerCAmelCase_ : str = use_query_residual
# masked language modeling attributes
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Dict = max_position_embeddings
# image classification attributes
lowerCAmelCase_ : Optional[int] = image_size
# flow attributes
lowerCAmelCase_ : Union[str, Any] = train_size
# multimodal autoencoding attributes
lowerCAmelCase_ : Any = num_frames
lowerCAmelCase_ : Union[str, Any] = audio_samples_per_frame
lowerCAmelCase_ : Optional[int] = samples_per_patch
lowerCAmelCase_ : Optional[Any] = output_shape
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
if self.task == "multiple-choice":
lowerCAmelCase_ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
@property
def UpperCAmelCase__ ( self : int):
return 1e-4
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , A_ : int = -1 , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , A_ : int = 3 , A_ : int = 4_0 , A_ : int = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(A_ , A_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Optional[Any] = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : Optional[int] = preprocessor.num_special_tokens_to_add(A_)
lowerCAmelCase_ : str = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_)
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : Optional[int] = [''' '''.join(['''a''']) * seq_length] * batch_size
lowerCAmelCase_ : List[Any] = dict(preprocessor(A_ , return_tensors=A_))
lowerCAmelCase_ : List[str] = inputs.pop('''input_ids''')
return inputs
elif isinstance(A_ , A_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Dict = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCAmelCase_ : Any = self._generate_dummy_images(A_ , A_ , A_ , A_)
lowerCAmelCase_ : List[str] = dict(preprocessor(images=A_ , return_tensors=A_))
lowerCAmelCase_ : List[Any] = inputs.pop('''pixel_values''')
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''')
| 171
|
import random
from typing import Any
def UpperCamelCase( __UpperCamelCase : list ):
for _ in range(len(__UpperCamelCase ) ):
lowerCAmelCase_ : Union[str, Any] = random.randint(0 ,len(__UpperCamelCase ) - 1 )
lowerCAmelCase_ : List[Any] = random.randint(0 ,len(__UpperCamelCase ) - 1 )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = data[b], data[a]
return data
if __name__ == "__main__":
A__ : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
A__ : int = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 171
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "gptj"
snake_case = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0400 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=28 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , )->str:
'''simple docstring'''
A_ : str = vocab_size
A_ : Any = n_positions
A_ : int = n_embd
A_ : Any = n_layer
A_ : Optional[Any] = n_head
A_ : Any = n_inner
A_ : Optional[Any] = rotary_dim
A_ : List[Any] = activation_function
A_ : int = resid_pdrop
A_ : Tuple = embd_pdrop
A_ : Union[str, Any] = attn_pdrop
A_ : Dict = layer_norm_epsilon
A_ : List[str] = initializer_range
A_ : Dict = use_cache
A_ : Dict = bos_token_id
A_ : List[str] = eos_token_id
super().__init__(
bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "default" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , )->Optional[int]:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , task=_SCREAMING_SNAKE_CASE , patching_specs=_SCREAMING_SNAKE_CASE , use_past=_SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , _SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
A_ : str = 0
@property
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
A_ : Any = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
A_ : List[str] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return self._config.n_layer
@property
def _snake_case ( self )->int:
'''simple docstring'''
return self._config.n_head
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , )->Mapping[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
A_ : int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ , A_ : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A_ : List[str] = seqlen + 2
A_ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ : Union[str, Any] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
A_ : Dict = common_inputs['''attention_mask''']
if self.use_past:
A_ : Tuple = ordered_inputs['''attention_mask'''].dtype
A_ : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return 13
| 152
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["input_values", "padding_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2_4000 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->Dict:
'''simple docstring'''
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ : Dict = chunk_length_s
A_ : Any = overlap
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )->BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
A_ : int = True
A_ : str = bool(
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
A_ : int = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
A_ : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
A_ : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE ).T]
# verify inputs are valid
for idx, example in enumerate(_SCREAMING_SNAKE_CASE ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
A_ : int = None
A_ : Optional[Any] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
A_ : List[str] = min(array.shape[0] for array in raw_audio )
A_ : int = int(np.floor(max_length / self.chunk_stride ) )
A_ : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
A_ : Optional[int] = max(array.shape[0] for array in raw_audio )
A_ : Any = int(np.ceil(max_length / self.chunk_stride ) )
A_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
A_ : Dict = '''max_length'''
else:
A_ : str = input_values
# normal padding on batch
if padded_inputs is None:
A_ : Dict = self.pad(
_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
if padding:
A_ : Any = padded_inputs.pop('''attention_mask''' )
A_ : str = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
A_ : int = example[..., None]
input_values.append(example.T )
A_ : Union[str, Any] = input_values
if return_tensors is not None:
A_ : str = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 152
| 1
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a_ : Optional[int] = logging.getLogger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="sequence-classification"
def __init__( self : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if type(snake_case__ ) == dict:
SCREAMING_SNAKE_CASE = Namespace(**snake_case__ )
SCREAMING_SNAKE_CASE = glue_output_modes[hparams.task]
SCREAMING_SNAKE_CASE = glue_tasks_num_labels[hparams.task]
super().__init__(snake_case__ , snake_case__ , self.mode )
def UpperCamelCase ( self : Dict , **snake_case__ : Any ):
"""simple docstring"""
return self.model(**snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
SCREAMING_SNAKE_CASE = self(**snake_case__ )
SCREAMING_SNAKE_CASE = outputs[0]
SCREAMING_SNAKE_CASE = self.trainer.lr_schedulers[0]['scheduler']
SCREAMING_SNAKE_CASE = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.hparams
SCREAMING_SNAKE_CASE = processors[args.task]()
SCREAMING_SNAKE_CASE = processor.get_labels()
for mode in ["train", "dev"]:
SCREAMING_SNAKE_CASE = self._feature_file(snake_case__ )
if os.path.exists(snake_case__ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , snake_case__ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
SCREAMING_SNAKE_CASE = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
SCREAMING_SNAKE_CASE = convert_examples_to_features(
snake_case__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , snake_case__ )
torch.save(snake_case__ , snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : str , snake_case__ : int , snake_case__ : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'dev' if mode == 'test' else mode
SCREAMING_SNAKE_CASE = self._feature_file(snake_case__ )
logger.info('Loading features from cached file %s' , snake_case__ )
SCREAMING_SNAKE_CASE = torch.load(snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , batch_size=snake_case__ , shuffle=snake_case__ , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
SCREAMING_SNAKE_CASE = self(**snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs[:2]
SCREAMING_SNAKE_CASE = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
SCREAMING_SNAKE_CASE = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE = np.argmax(snake_case__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE = np.squeeze(snake_case__ )
SCREAMING_SNAKE_CASE = np.concatenate([x['target'] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , snake_case__ , snake_case__ )}
SCREAMING_SNAKE_CASE = dict(results.items() )
SCREAMING_SNAKE_CASE = results
return ret, preds_list, out_label_list
def UpperCamelCase ( self : int , snake_case__ : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._eval_end(snake_case__ )
SCREAMING_SNAKE_CASE = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._eval_end(snake_case__ )
SCREAMING_SNAKE_CASE = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : List[str] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(snake_case__ , snake_case__ )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=snake_case__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=snake_case__ , required=snake_case__ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=snake_case__ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
add_generic_args(_UpperCamelCase , os.getcwd() )
SCREAMING_SNAKE_CASE = GLUETransformer.add_model_specific_args(_UpperCamelCase , os.getcwd() )
SCREAMING_SNAKE_CASE = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
SCREAMING_SNAKE_CASE = os.path.join(
'./results' , f"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE = GLUETransformer(_UpperCamelCase )
SCREAMING_SNAKE_CASE = generic_train(_UpperCamelCase , _UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
SCREAMING_SNAKE_CASE = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCamelCase )
if __name__ == "__main__":
main()
| 439
|
import baseaa
def __lowerCAmelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def __lowerCAmelCase ( _UpperCamelCase : bytes ) -> str:
'''simple docstring'''
return baseaa.aaadecode(_UpperCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 439
| 1
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : int ) -> float:
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
snake_case = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple )-> Dict:
snake_case = psutil.Process()
snake_case = False
def lowerCAmelCase ( self : int )-> Optional[int]:
snake_case = -1
while True:
snake_case = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase ( self : Union[str, Any] )-> Union[str, Any]:
snake_case = True
snake_case = threading.Thread(target=self.peak_monitor )
snake_case = True
self.thread.start()
def lowerCAmelCase ( self : int )-> Optional[Any]:
snake_case = False
self.thread.join()
return self.cpu_memory_peak
_SCREAMING_SNAKE_CASE = PeakCPUMemory()
def __lowerCamelCase ( ) -> List[Any]:
# Time
snake_case = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case = torch.cuda.memory_allocated(__lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> str:
# Time
snake_case = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
snake_case = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case = (torch.cuda.memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
snake_case = (torch.cuda.max_memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
return measures
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> str:
print(F'''{description}:''' )
print(F'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(__lowerCAmelCase )]:.2f}MiB''' )
snake_case = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 517
| 1
|
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = AlbertConfig.from_json_file(_UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = AlbertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 390
|
'''simple docstring'''
import math
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
UpperCAmelCase_ = F"""Input value of [number={number}] must be > 0"""
raise ValueError(_UpperCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCAmelCase_ = int(math.log(number // 3 , 2 ) ) + 2
UpperCAmelCase_ = [3, 5]
UpperCAmelCase_ = 2
UpperCAmelCase_ = 3
for block in range(1 , _UpperCamelCase ):
for _ in range(_UpperCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ : Dict = 0
try:
lowercase__ : Any = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 390
| 1
|
'''simple docstring'''
from manim import *
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
__SCREAMING_SNAKE_CASE : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE : Dict = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__SCREAMING_SNAKE_CASE : str = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__SCREAMING_SNAKE_CASE : List[str] = Text("""CPU""" , font_size=2_4 )
__SCREAMING_SNAKE_CASE : Any = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [mem.copy() for i in range(4 )]
__SCREAMING_SNAKE_CASE : Union[str, Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__SCREAMING_SNAKE_CASE : Any = Text("""GPU""" , font_size=2_4 )
__SCREAMING_SNAKE_CASE : str = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__SCREAMING_SNAKE_CASE : str = Text("""Model""" , font_size=2_4 )
__SCREAMING_SNAKE_CASE : int = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
cpu_targs.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE : Dict = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__SCREAMING_SNAKE_CASE : Any = Text("""Loaded Checkpoint""" , font_size=2_4 )
__SCREAMING_SNAKE_CASE : Any = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , aligned_edge=lowerCAmelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__SCREAMING_SNAKE_CASE : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__SCREAMING_SNAKE_CASE : int = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__SCREAMING_SNAKE_CASE : Dict = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ) , Write(lowerCAmelCase__ ) )
self.play(Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : str = []
for i, rect in enumerate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
first_animations.append(GrowFromCenter(lowerCAmelCase__ , run_time=1 ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(*lowerCAmelCase__ )
self.wait()
| 712
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE : str = controlnet_params
__SCREAMING_SNAKE_CASE : str = """bird"""
__SCREAMING_SNAKE_CASE : Optional[int] = jax.device_count()
__SCREAMING_SNAKE_CASE : str = pipe.prepare_text_inputs([prompts] * num_samples )
__SCREAMING_SNAKE_CASE : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(lowerCAmelCase__ , jax.device_count() )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=5_0 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = controlnet_params
__SCREAMING_SNAKE_CASE : List[str] = """Chef in the kitchen"""
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
__SCREAMING_SNAKE_CASE : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
__SCREAMING_SNAKE_CASE : Dict = pipe.prepare_image_inputs([pose_image] * num_samples )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(lowerCAmelCase__ , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = replicate(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = shard(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=5_0 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : List[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 178
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''gptj'''
a__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __lowerCAmelCase=5_0_4_0_0 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=4_0_9_6 , __lowerCAmelCase=2_8 , __lowerCAmelCase=1_6 , __lowerCAmelCase=6_4 , __lowerCAmelCase=None , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , __lowerCAmelCase=5_0_2_5_6 , __lowerCAmelCase=5_0_2_5_6 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Dict = vocab_size
__magic_name__ :int = n_positions
__magic_name__ :Union[str, Any] = n_embd
__magic_name__ :List[Any] = n_layer
__magic_name__ :Dict = n_head
__magic_name__ :Optional[Any] = n_inner
__magic_name__ :str = rotary_dim
__magic_name__ :int = activation_function
__magic_name__ :Any = resid_pdrop
__magic_name__ :Optional[Any] = embd_pdrop
__magic_name__ :List[Any] = attn_pdrop
__magic_name__ :Dict = layer_norm_epsilon
__magic_name__ :Dict = initializer_range
__magic_name__ :Union[str, Any] = use_cache
__magic_name__ :Tuple = bos_token_id
__magic_name__ :Dict = eos_token_id
super().__init__(
bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase )
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = "default" , __lowerCAmelCase = None , __lowerCAmelCase = False , ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , task=__lowerCAmelCase , patching_specs=__lowerCAmelCase , use_past=__lowerCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __lowerCAmelCase ):
# TODO: how to do that better?
__magic_name__ :Dict = 0
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' )
__magic_name__ :Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__magic_name__ :List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def A ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self ):
"""simple docstring"""
return self._config.n_head
def A ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :Any = super(__lowerCAmelCase , self ).generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
__magic_name__ :Optional[int] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__magic_name__ , __magic_name__ :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__magic_name__ :Optional[Any] = seqlen + 2
__magic_name__ :Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__magic_name__ :str = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
__magic_name__ :Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
__magic_name__ :int = ordered_inputs['''attention_mask'''].dtype
__magic_name__ :Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self ):
"""simple docstring"""
return 1_3
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCamelCase__ = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCamelCase__ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowercase :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class __lowercase :
def __init__( self : int , lowercase__ : Iterable[int] ):
a_ = None
for i in sorted(lowercase__ , reverse=lowercase__ ):
a_ = Node(lowercase__ , self.head )
def __iter__( self : str ):
a_ = self.head
while node:
yield node.data
a_ = node.next_node
def __len__( self : Optional[int] ):
return sum(1 for _ in self )
def __str__( self : Optional[Any] ):
return " -> ".join([str(lowercase__ ) for node in self] )
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
return SortedLinkedList(list(_A ) + list(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 143
| 0
|
def lowercase ( SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def lowercase ( SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = credit_card_number
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE ) - 2
for i in range(SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
SCREAMING_SNAKE_CASE_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
SCREAMING_SNAKE_CASE_ = cc_number[:i] + str(SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowercase ( SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(SCREAMING_SNAKE_CASE ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(SCREAMING_SNAKE_CASE ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(SCREAMING_SNAKE_CASE ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 205
|
def lowercase ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE_ = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = min_till_now, max_till_now
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , max_till_now * number )
SCREAMING_SNAKE_CASE_ = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 205
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : int = 10**9 ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Tuple = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : str = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 398
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase_ : Any = {
"""do_resize""": True,
"""size""": {"""height""": 2_24, """width""": 2_24},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : List[str] , **lowerCamelCase : Optional[int] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowercase ( self : Tuple , **lowerCamelCase : str ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowercase ( self : Union[str, Any] , **lowerCamelCase : Tuple ) -> str:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowercase ( self : Union[str, Any] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Tuple ) -> int:
lowerCAmelCase_ : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : Any = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : List[Any] = self.get_image_processor()
lowerCAmelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
lowerCAmelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def __lowercase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase_ : Optional[int] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : Tuple = self.get_image_processor()
lowerCAmelCase_ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = image_processor(lowerCamelCase , return_tensors="""np""" )
lowerCAmelCase_ : List[Any] = processor(images=lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : int = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase_ : Optional[Any] = processor(text=lowerCamelCase )
lowerCAmelCase_ : List[str] = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : List[Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase_ : int = self.prepare_image_inputs()
lowerCAmelCase_ : Union[str, Any] = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def __lowercase ( self : int ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Dict = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : List[Any] = processor.batch_decode(lowerCamelCase )
lowerCAmelCase_ : str = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : str ) -> List[Any]:
lowerCAmelCase_ : Tuple = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : int = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase_ : str = self.prepare_image_inputs()
lowerCAmelCase_ : Tuple = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 398
| 1
|
"""simple docstring"""
def _lowerCamelCase ( __a, __a ):
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCamelCase ( ):
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 626
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case :
def __init__(self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE_ = [1, 2, 3, 4]
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = len(self.first_signal )
SCREAMING_SNAKE_CASE_ = len(self.second_signal )
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE_ = [[0] * max_length for i in range(SCREAMING_SNAKE_CASE_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = deque(self.second_signal )
rotated_signal.rotate(SCREAMING_SNAKE_CASE_ )
for j, item in enumerate(SCREAMING_SNAKE_CASE_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE_ = np.matmul(np.transpose(SCREAMING_SNAKE_CASE_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(SCREAMING_SNAKE_CASE_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 626
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class lowerCAmelCase__( _UpperCamelCase ):
'''simple docstring'''
__snake_case = 'codegen'
__snake_case = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __lowerCamelCase=5_0_4_0_0 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=2_8 , __lowerCamelCase=1_6 , __lowerCamelCase=6_4 , __lowerCamelCase=None , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=1E-5 , __lowerCamelCase=0.02 , __lowerCamelCase=True , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=False , **__lowerCamelCase , ) -> str:
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : int = n_ctx
_SCREAMING_SNAKE_CASE : Tuple = n_positions
_SCREAMING_SNAKE_CASE : List[Any] = n_embd
_SCREAMING_SNAKE_CASE : List[str] = n_layer
_SCREAMING_SNAKE_CASE : Any = n_head
_SCREAMING_SNAKE_CASE : Any = n_inner
_SCREAMING_SNAKE_CASE : Union[str, Any] = rotary_dim
_SCREAMING_SNAKE_CASE : List[str] = activation_function
_SCREAMING_SNAKE_CASE : Optional[int] = resid_pdrop
_SCREAMING_SNAKE_CASE : List[str] = embd_pdrop
_SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : int = use_cache
_SCREAMING_SNAKE_CASE : Any = bos_token_id
_SCREAMING_SNAKE_CASE : Dict = eos_token_id
super().__init__(
bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase = "default" , __lowerCamelCase = None , __lowerCamelCase = False , ) -> List[Any]:
super().__init__(__lowerCamelCase , task=__lowerCamelCase , patching_specs=__lowerCamelCase , use_past=__lowerCamelCase )
if not getattr(self._config , "pad_token_id" , __lowerCamelCase ):
# TODO: how to do that better?
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
@property
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
_SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return self._config.n_layer
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return self._config.n_head
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
_SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_SCREAMING_SNAKE_CASE : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_SCREAMING_SNAKE_CASE : Union[str, Any] = seqlen + 2
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
_SCREAMING_SNAKE_CASE : Dict = common_inputs["attention_mask"]
if self.use_past:
_SCREAMING_SNAKE_CASE : str = ordered_inputs["attention_mask"].dtype
_SCREAMING_SNAKE_CASE : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ) -> List[str]:
return 1_3
| 715
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = "ylacombe/bark-small"
_SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : List[str] = "en_speaker_1"
_SCREAMING_SNAKE_CASE : List[Any] = "This is a test string"
_SCREAMING_SNAKE_CASE : Optional[int] = "speaker_embeddings_path.json"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "speaker_embeddings"
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> str:
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Dict = BarkProcessor(tokenizer=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE : Any = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_SCREAMING_SNAKE_CASE : List[Any] = 3_5
_SCREAMING_SNAKE_CASE : Any = 2
_SCREAMING_SNAKE_CASE : int = 8
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"semantic_prompt": np.ones(__lowerCamelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_SCREAMING_SNAKE_CASE : Optional[int] = processor(text=self.input_string , voice_preset=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , "file.npz" )
np.savez(__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = processor(text=self.input_string , voice_preset=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor(tokenizer=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = processor(text=self.input_string )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
self.input_string , padding="max_length" , max_length=2_5_6 , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 381
| 0
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = 1
snake_case__ = 2
for i in range(2 , max_n + 1 ):
snake_case__ = pre_numerator
snake_case__ = 2 * i // 3 if i % 3 == 0 else 1
snake_case__ = cur_numerator
snake_case__ = e_cont * pre_numerator + temp
return sum_digits(__lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Optional[int]:
"""simple docstring"""
_lowercase: Optional[int] = parent
_lowercase: str = batch_size
_lowercase: Any = num_channels
_lowercase: Any = image_size
_lowercase: Any = min_resolution
_lowercase: int = max_resolution
_lowercase: Union[str, Any] = do_resize
_lowercase: List[Any] = size if size is not None else {'''height''': 18, '''width''': 20}
_lowercase: Optional[int] = do_thumbnail
_lowercase: List[str] = do_align_axis
_lowercase: int = do_pad
_lowercase: Optional[Any] = do_normalize
_lowercase: List[Any] = image_mean
_lowercase: Union[str, Any] = image_std
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = DonutImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: str = DonutImageProcessingTester(self )
@property
def lowercase_ ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
self.assertTrue(hasattr(A_ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(A_ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(A_ , '''do_pad''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
_lowercase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
_lowercase: Any = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_flaky()
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowercase: Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowercase: List[Any] = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowercase: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowercase: Dict = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowercase: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowercase: Dict = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 353
| 0
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="encodec"
def __init__( self , lowerCamelCase=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase=2_4000 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=128 , lowerCamelCase=32 , lowerCamelCase=1 , lowerCamelCase=[8, 5, 4, 2] , lowerCamelCase="weight_norm" , lowerCamelCase=7 , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=True , lowerCamelCase="reflect" , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=1.0 , lowerCamelCase=1024 , lowerCamelCase=None , lowerCamelCase=True , **lowerCamelCase , ) ->int:
'''simple docstring'''
__a = target_bandwidths
__a = sampling_rate
__a = audio_channels
__a = normalize
__a = chunk_length_s
__a = overlap
__a = hidden_size
__a = num_filters
__a = num_residual_layers
__a = upsampling_ratios
__a = norm_type
__a = kernel_size
__a = last_kernel_size
__a = residual_kernel_size
__a = dilation_growth_rate
__a = use_causal_conv
__a = pad_mode
__a = compress
__a = num_lstm_layers
__a = trim_right_ratio
__a = codebook_size
__a = codebook_dim if codebook_dim is not None else hidden_size
__a = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCamelCase )
@property
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 706
|
'''simple docstring'''
__UpperCamelCase : Optional[Any] = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 270
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowercase ( __A : dict ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def lowercase ( __A : np.ndarray , __A : np.ndarray ) -> XGBClassifier:
'''simple docstring'''
snake_case : List[str] = XGBClassifier()
classifier.fit(__A , __A )
return classifier
def lowercase ( ) -> None:
'''simple docstring'''
snake_case : Any = load_iris()
snake_case , snake_case : str = data_handling(__A )
snake_case , snake_case , snake_case , snake_case : Optional[int] = train_test_split(
__A , __A , test_size=0.25 )
snake_case : Union[str, Any] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case : int = xgboost(__A , __A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__A , __A , __A , display_labels=__A , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
move_disk(lowerCamelCase_ , lowerCamelCase_ )
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
print("moving disk from" , lowerCamelCase_ , "to" , lowerCamelCase_ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = int(input("Height of hanoi: " ).strip() )
move_tower(lowerCamelCase_ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 378
| 0
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCamelCase = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_UpperCamelCase = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
| 589
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( lowercase ):
__lowercase : str = (DEISMultistepScheduler,)
__lowercase : Union[str, Any] = (("num_inference_steps", 25),)
def lowercase ( self , **lowerCamelCase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**lowerCamelCase_ )
return config
def lowercase ( self , lowerCamelCase_=0 , **lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop("num_inference_steps" , lowerCamelCase_ )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_ )
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_UpperCamelCase = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase , _UpperCamelCase = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_UpperCamelCase = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase ( self , lowerCamelCase_=0 , **lowerCamelCase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop("num_inference_steps" , lowerCamelCase_ )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_UpperCamelCase = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_UpperCamelCase = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase ( self , lowerCamelCase_=None , **lowerCamelCase_ ) -> Any:
"""simple docstring"""
if scheduler is None:
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_ )
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(**lowerCamelCase_ )
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
_UpperCamelCase = 10
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop("num_inference_steps" , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , "set_timesteps" ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , "set_timesteps" ):
_UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCamelCase = scheduler.timesteps[5]
_UpperCamelCase = scheduler.timesteps[6]
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
_UpperCamelCase = self.full_loop(scheduler=lowerCamelCase_ )
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
_UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase = self.full_loop(scheduler=lowerCamelCase_ )
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type="deis" , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def lowercase ( self ) -> Dict:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
_UpperCamelCase = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.full_loop()
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.0_91 ) < 1E-3
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
_UpperCamelCase = 10
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 589
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : List[Any] = """BlipImageProcessor"""
__lowerCAmelCase : str = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self ,__snake_case ,__snake_case ):
"""simple docstring"""
A_ = False
super().__init__(__snake_case ,__snake_case )
A_ = self.image_processor
def __call__( self ,__snake_case = None ,__snake_case = None ,__snake_case = True ,__snake_case = False ,__snake_case = None ,__snake_case = None ,__snake_case = 0 ,__snake_case = None ,__snake_case = None ,__snake_case = False ,__snake_case = False ,__snake_case = False ,__snake_case = False ,__snake_case = False ,__snake_case = True ,__snake_case = None ,**__snake_case ,):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
A_ = self.tokenizer
A_ = self.tokenizer(
text=__snake_case ,add_special_tokens=__snake_case ,padding=__snake_case ,truncation=__snake_case ,max_length=__snake_case ,stride=__snake_case ,pad_to_multiple_of=__snake_case ,return_attention_mask=__snake_case ,return_overflowing_tokens=__snake_case ,return_special_tokens_mask=__snake_case ,return_offsets_mapping=__snake_case ,return_token_type_ids=__snake_case ,return_length=__snake_case ,verbose=__snake_case ,return_tensors=__snake_case ,**__snake_case ,)
return text_encoding
# add pixel_values
A_ = self.image_processor(__snake_case ,return_tensors=__snake_case )
if text is not None:
A_ = self.tokenizer(
text=__snake_case ,add_special_tokens=__snake_case ,padding=__snake_case ,truncation=__snake_case ,max_length=__snake_case ,stride=__snake_case ,pad_to_multiple_of=__snake_case ,return_attention_mask=__snake_case ,return_overflowing_tokens=__snake_case ,return_special_tokens_mask=__snake_case ,return_offsets_mapping=__snake_case ,return_token_type_ids=__snake_case ,return_length=__snake_case ,verbose=__snake_case ,return_tensors=__snake_case ,**__snake_case ,)
else:
A_ = None
if text_encoding is not None:
encoding_image_processor.update(__snake_case )
return encoding_image_processor
def __UpperCAmelCase ( self ,*__snake_case ,**__snake_case ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__snake_case ,**__snake_case )
def __UpperCAmelCase ( self ,*__snake_case ,**__snake_case ):
"""simple docstring"""
return self.tokenizer.decode(*__snake_case ,**__snake_case )
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 188
|
import argparse
import os
import re
a__ : str = 'src/transformers'
# Pattern that looks at the indentation in a line.
a__ : Union[str, Any] = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
a__ : List[Any] = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ : Dict = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
a__ : Optional[int] = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ : Optional[Any] = re.compile(R'\[([^\]]+)\]')
def UpperCAmelCase_ ( _UpperCAmelCase :Tuple ) -> Union[str, Any]:
'''simple docstring'''
A_ = _re_indent.search(_UpperCAmelCase )
return "" if search is None else search.groups()[0]
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[int] , _UpperCAmelCase :List[Any]="" , _UpperCAmelCase :Optional[Any]=None , _UpperCAmelCase :List[str]=None ) -> Optional[Any]:
'''simple docstring'''
A_ = 0
A_ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(_UpperCAmelCase ):
index += 1
A_ = ['''\n'''.join(lines[:index] )]
else:
A_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A_ = [lines[index]]
index += 1
while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(_UpperCAmelCase ) )
if index < len(_UpperCAmelCase ) - 1:
A_ = [lines[index + 1]]
index += 1
else:
A_ = []
else:
blocks.append('''\n'''.join(_UpperCAmelCase ) )
A_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_UpperCAmelCase ) > 0:
blocks.append('''\n'''.join(_UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_UpperCAmelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def UpperCAmelCase_ ( _UpperCAmelCase :List[Any] ) -> Dict:
'''simple docstring'''
def _inner(_UpperCAmelCase :Dict ):
return key(_UpperCAmelCase ).lower().replace('''_''' , '''''' )
return _inner
def UpperCAmelCase_ ( _UpperCAmelCase :List[str] , _UpperCAmelCase :Dict=None ) -> Union[str, Any]:
'''simple docstring'''
def noop(_UpperCAmelCase :int ):
return x
if key is None:
A_ = noop
# Constants are all uppercase, they go first.
A_ = [obj for obj in objects if key(_UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A_ = [obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
A_ = [obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()]
A_ = ignore_underscore(_UpperCAmelCase )
return sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> str:
'''simple docstring'''
def _replace(_UpperCAmelCase :List[Any] ):
A_ = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
A_ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A_ = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_UpperCAmelCase )] ) + "]"
A_ = import_statement.split('''\n''' )
if len(_UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A_ = 2 if lines[1].strip() == '''[''' else 1
A_ = [(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A_ = sort_objects(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )
A_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A_ = _re_bracket_content.sub(_replace , lines[1] )
else:
A_ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A_ = keys[:-1]
A_ = get_indent(lines[1] ) + ''', '''.join([f'"{k}"' for k in sort_objects(_UpperCAmelCase )] )
return "\n".join(_UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
A_ = _re_bracket_content.sub(_replace , _UpperCAmelCase )
return import_statement
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :int=True ) -> str:
'''simple docstring'''
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
A_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A_ = split_code_in_indented_blocks(
_UpperCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A_ = main_blocks[block_idx]
A_ = block.split('''\n''' )
# Get to the start of the imports.
A_ = 0
while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A_ = len(_UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(_UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
A_ = '''\n'''.join(block_lines[line_idx:-1] )
A_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A_ = split_code_in_indented_blocks(_UpperCAmelCase , indent_level=_UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
A_ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A_ = [(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A_ = [(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None]
A_ = [x[0] for x in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A_ = 0
A_ = []
for i in range(len(_UpperCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
A_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
A_ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_UpperCAmelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_UpperCAmelCase ) )
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[Any]=True ) -> Tuple:
'''simple docstring'''
A_ = []
for root, _, files in os.walk(_UpperCAmelCase ):
if "__init__.py" in files:
A_ = sort_imports(os.path.join(_UpperCAmelCase , '''__init__.py''' ) , check_only=_UpperCAmelCase )
if result:
A_ = [os.path.join(_UpperCAmelCase , '''__init__.py''' )]
if len(_UpperCAmelCase ) > 0:
raise ValueError(f'Would overwrite {len(_UpperCAmelCase )} files, run `make style`.' )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a__ : List[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 188
| 1
|
import socket
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_UpperCAmelCase = socket.gethostname()
_UpperCAmelCase = 12_312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_UpperCAmelCase = sock.recv(1_024 )
if not data:
break
out_file.write(_UpperCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 708
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Any , A__ : str , A__ : Union[str, Any]=7 , A__ : str=3 , A__ : str=18 , A__ : Optional[Any]=30 , A__ : Optional[int]=400 , A__ : int=True , A__ : Any=None , A__ : Tuple=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : str=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__lowerCamelCase : str = size if size is not None else {'''height''': 18, '''width''': 18}
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : List[str] = num_channels
__lowerCamelCase : List[Any] = image_size
__lowerCamelCase : List[Any] = min_resolution
__lowerCamelCase : List[str] = max_resolution
__lowerCamelCase : Tuple = do_resize
__lowerCamelCase : Any = size
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : Union[str, Any] = image_mean
__lowerCamelCase : str = image_std
def a_ ( self : int ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
snake_case__ : str = DPTImageProcessor if is_vision_available() else None
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = DPTImageProcessingTester(self )
@property
def a_ ( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_std""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """size""" ) )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowerCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a_ ( self : int ):
"""simple docstring"""
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase : Dict = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 150
|
"""simple docstring"""
import os
def A_ ( ) -> Any:
with open(os.path.dirname(snake_case__ ) + '''/p022_names.txt''' ) as file:
_UpperCamelCase :Optional[Any] = str(file.readlines()[0] )
_UpperCamelCase :Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_UpperCamelCase :str = 0
_UpperCamelCase :Union[str, Any] = 0
for i, name in enumerate(snake_case__ ):
for letter in name:
name_score += ord(snake_case__ ) - 64
total_score += (i + 1) * name_score
_UpperCamelCase :List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 355
| 0
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = 0
@slow
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase__ ) , 0 )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
# Check that tokenizer_type ≠ model_type
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase__ , """vocab.txt""" ) )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""bert""" , use_fast=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase__ , """merges.txt""" ) )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""gpt2""" , use_fast=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@require_tokenizers
def snake_case_ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase__ , """vocab.txt""" ) )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""bert""" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase__ , """merges.txt""" ) )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""gpt2""" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase__ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = TOKENIZER_MAPPING.values()
A_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase__ )
@require_tokenizers
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , UpperCamelCase__ )
@require_tokenizers
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=UpperCamelCase__ )
A_ = """Hello, world. How are you?"""
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=UpperCamelCase__ )
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = get_tokenizer_config("""bert-base-cased""" )
A_ = config.pop("""_commit_hash""" , UpperCamelCase__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase__ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ = get_tokenizer_config(UpperCamelCase__ )
self.assertDictEqual(UpperCamelCase__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = get_tokenizer_config(UpperCamelCase__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
A_ = CustomTokenizer.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCamelCase__ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = BertTokenizerFast.from_pretrained(UpperCamelCase__ )
bert_tokenizer.save_pretrained(UpperCamelCase__ )
A_ = CustomTokenizerFast.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def snake_case_ ( self ) -> Any:
'''simple docstring'''
class A__ ( _snake_case ):
lowercase = False
class A__ ( _snake_case ):
lowercase = NewTokenizer
lowercase = False
try:
AutoConfig.register("""custom""" , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
# If remote code is not set, the default is to use local
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ = AutoTokenizer.from_pretrained("""bert-base""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , revision="""aaaaaa""" )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 717
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( lowerCamelCase__ : ArgumentParser ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
raise NotImplementedError()
| 332
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332
| 1
|
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __a ( A = True , *A , **A ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
lowercase__ = False
if main_process_only:
lowercase__ = PartialState().local_process_index == 0
return _tqdm(*A , **A , disable=A )
| 668
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE :Dict = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 628
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
UpperCamelCase_ = False
if num < 0:
UpperCamelCase_ = True
UpperCamelCase_ = -num
UpperCamelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case_ = StableDiffusionSAGPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = False
def A_ ( self : int ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ = CLIPTextModel(UpperCAmelCase__ )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Any , lowercase_ : Any , lowercase_ : List[str]=0 ):
if str(UpperCAmelCase__ ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(UpperCAmelCase__ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
snake_case_ = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict ):
snake_case_ = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
snake_case_ = sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case_ = '''.'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = sag_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A_ ( self : str ):
snake_case_ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
snake_case_ = sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case_ = '''.'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = sag_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A_ ( self : Optional[int] ):
snake_case_ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
snake_case_ = sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case_ = '''.'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
snake_case_ = output.images
assert image.shape == (1, 512, 768, 3)
| 700
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a : int = None
a : int = logging.get_logger(__name__)
a : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
a : Any = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
a : Optional[int] = '▁'
# Segments (not really needed)
a : int = 0
a : Optional[Any] = 1
a : str = 2
a : str = 3
a : int = 4
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = "left"
snake_case_ = XLNetTokenizer
def __init__( self : int , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : List[str]=False , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[Any]="<s>" , lowercase_ : int="</s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : int="<sep>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : Optional[Any]="<cls>" , lowercase_ : Tuple="<mask>" , lowercase_ : Any=["<eop>", "<eod>"] , **lowercase_ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
vocab_file=lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ = 3
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def A_ ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A_ ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 593
| 0
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
UpperCamelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self : Optional[int] , _A : List[Any]=None , _A : Tuple=None , **_A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
UpperCAmelCase__ : Optional[int] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
UpperCAmelCase__ : Dict = tokenizer
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''gpt2''' )
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_A , _A )
def __call__( self : Dict , _A : Any=None , _A : Tuple=None , _A : Dict=None , **_A : int ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCAmelCase__ : Any = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None:
UpperCAmelCase__ : Any = self.char_tokenizer(_A , return_tensors=_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase__ : Any = encodings['''input_ids''']
return inputs
def lowercase_ ( self : Optional[int] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = sequences
UpperCAmelCase__ : Union[str, Any] = char_preds.size(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self._decode_helper(_A , '''char''' )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._decode_helper(_A , '''bpe''' )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self._decode_helper(_A , '''wp''' )
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[int] = []
for i in range(_A ):
UpperCAmelCase__ : Tuple = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase__ : Tuple = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase__ : int = scores.index(max(_A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Optional[int] = final_strs
UpperCAmelCase__ : Dict = final_scores
UpperCAmelCase__ : Optional[Any] = char_strs
UpperCAmelCase__ : List[Any] = bpe_strs
UpperCAmelCase__ : Dict = wp_strs
return out
def lowercase_ ( self : str , _A : Union[str, Any] , _A : Dict ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
UpperCAmelCase__ : List[str] = self.char_decode
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Optional[Any] = '''[s]'''
elif format == DecodeType.BPE:
UpperCAmelCase__ : Optional[Any] = self.bpe_decode
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : str = '''#'''
elif format == DecodeType.WORDPIECE:
UpperCAmelCase__ : Union[str, Any] = self.wp_decode
UpperCAmelCase__ : Dict = 102
UpperCAmelCase__ : int = '''[SEP]'''
else:
raise ValueError(f"""Format {format} is not supported.""" )
UpperCAmelCase__ , UpperCAmelCase__ : str = [], []
UpperCAmelCase__ : int = pred_logits.size(0 )
UpperCAmelCase__ : Tuple = pred_logits.size(1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A )
UpperCAmelCase__ : Tuple = preds_index.view(-1 , _A )[:, 1:]
UpperCAmelCase__ : Optional[int] = decoder(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 )
UpperCAmelCase__ : Optional[Any] = preds_max_prob[:, 1:]
for index in range(_A ):
UpperCAmelCase__ : Optional[int] = preds_str[index].find(_A )
UpperCAmelCase__ : Tuple = preds_str[index][:pred_eos]
UpperCAmelCase__ : Dict = preds_index[index].cpu().tolist()
UpperCAmelCase__ : str = pred_index.index(_A ) if eos_token in pred_index else -1
UpperCAmelCase__ : List[str] = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase__ : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_A )
conf_scores.append(_A )
return dec_strs, conf_scores
def lowercase_ ( self : Any , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_A )]
return decode_strs
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(_A )
def lowercase_ ( self : Union[str, Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_A )]
return decode_strs
| 75
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCamelCase_ = TypeVar('''T''')
lowerCamelCase_ = Union[List[T], Tuple[T, ...]]
lowerCamelCase_ = Union[T, List[T], Dict[str, T]]
lowerCamelCase_ = Union[str, bytes, os.PathLike]
| 95
| 0
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowercase ( lowercase_ , lowercase_ , lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : int = 50_257 , UpperCamelCase_ : Optional[int] = 1_024 , UpperCamelCase_ : List[str] = 768 , UpperCamelCase_ : Any = 12 , UpperCamelCase_ : int = 12 , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Optional[Any] = "gelu_new" , UpperCamelCase_ : Union[str, Any] = 0.1 , UpperCamelCase_ : Optional[int] = 0.1 , UpperCamelCase_ : Dict = 0.1 , UpperCamelCase_ : Tuple = 1e-5 , UpperCamelCase_ : List[Any] = 0.02 , UpperCamelCase_ : str = True , UpperCamelCase_ : Optional[Any] = True , UpperCamelCase_ : Optional[Any] = False , UpperCamelCase_ : Optional[int] = False , ):
"""simple docstring"""
super().__init__()
__A = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal." )
__A = prefix_inner_dim
__A = prefix_hidden_dim
__A = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__A = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__A = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
__A = GPTaLMHeadModel(A__ )
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int = None , UpperCamelCase_ : List[str] = None , ):
"""simple docstring"""
__A = self.transformer.transformer.wte(A__ )
__A = self.encode_prefix(A__ )
__A = self.decode_prefix(A__ )
__A = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__A = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__A = torch.cat((dummy_token, input_ids) , dim=1 )
__A = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ):
"""simple docstring"""
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
return self.encode_prefix(A__ )
@torch.no_grad()
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
__A = torch.split(A__ , 1 , dim=0 )
__A = []
__A = []
for feature in features:
__A = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
__A = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__A = torch.stack(A__ )
__A = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str=None , UpperCamelCase_ : str = 5 , UpperCamelCase_ : str = 67 , UpperCamelCase_ : Optional[Any] = 1.0 , UpperCamelCase_ : Dict = None , ):
"""simple docstring"""
__A = eos_token_id
__A = None
__A = None
__A = torch.ones(A__ , device=A__ , dtype=torch.int )
__A = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
__A = input_embeds
else:
__A = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
__A = self.transformer(inputs_embeds=A__ )
__A = outputs.logits
__A = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__A = logits.softmax(-1 ).log()
if scores is None:
__A = logits.topk(A__ , -1 )
__A = generated.expand(A__ , *generated.shape[1:] )
__A = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__A = next_tokens
else:
__A = tokens.expand(A__ , *tokens.shape[1:] )
__A = torch.cat((tokens, next_tokens) , dim=1 )
else:
__A = -float(np.inf )
__A = 0
__A = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__A = scores_sum / seq_lengths[:, None]
__A = scores_sum_average.view(-1 ).topk(A__ , -1 )
__A = next_tokens // scores_sum.shape[1]
__A = seq_lengths[next_tokens_source]
__A = next_tokens % scores_sum.shape[1]
__A = next_tokens.unsqueeze(1 )
__A = tokens[next_tokens_source]
__A = torch.cat((tokens, next_tokens) , dim=1 )
__A = generated[next_tokens_source]
__A = scores_sum_average * seq_lengths
__A = is_stopped[next_tokens_source]
__A = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__A = torch.cat((generated, next_token_embed) , dim=1 )
__A = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
__A = scores / seq_lengths
__A = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
__A = [tokens[i] for i in order]
__A = torch.stack(A__ , dim=0 )
__A = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 720
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "AutoImageProcessor"
SCREAMING_SNAKE_CASE = "AutoTokenizer"
def __init__( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : str ):
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__A = self.image_processor
def __call__( self : Optional[Any] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : int ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__A = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
__A = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
__A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 199
| 0
|
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(UpperCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self : str ):
"""simple docstring"""
__lowerCAmelCase = ""
__lowerCAmelCase = ""
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 256
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = cva.imread(snake_case__ , 0 )
__lowerCAmelCase = copy.deepcopy(self.img )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
__lowerCAmelCase = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
__lowerCAmelCase = x[i] / self.k
self.sk += prk
__lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCAmelCase = int(last % last )
__lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
__lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
__lowerCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 611
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : int = """timesformer"""
def __init__( self : Optional[int] , lowercase : Dict=224 , lowercase : Optional[int]=16 , lowercase : Tuple=3 , lowercase : List[str]=8 , lowercase : List[str]=768 , lowercase : Any=12 , lowercase : Optional[Any]=12 , lowercase : Tuple=3_072 , lowercase : Any="gelu" , lowercase : Any=0.0 , lowercase : Tuple=0.0 , lowercase : Any=0.02 , lowercase : Optional[Any]=1E-6 , lowercase : Union[str, Any]=True , lowercase : Tuple="divided_space_time" , lowercase : List[Any]=0 , **lowercase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = num_frames
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = qkv_bias
__lowercase = attention_type
__lowercase = drop_path_rate
| 709
|
import unittest
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
__lowercase = np.shape(lowercase__ )
if shape_a[0] != shape_b[0]:
__lowercase = (
"""Expected the same number of rows for A and B. """
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(lowercase__ )
if shape_b[1] != shape_c[1]:
__lowercase = (
"""Expected the same number of columns for B and C. """
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(lowercase__ )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(lowercase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(lowercase , lowercase , lowercase )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
__lowercase = np.linalg.det(lowercase )
self.assertAlmostEqual(lowercase , det_a * det_s )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
def snake_case__ ( self : Tuple ) -> None:
"""simple docstring"""
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase ):
schur_complement(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 634
| 0
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A : str = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :Tuple ,_UpperCamelCase :Path ,_UpperCamelCase :Union[str, None] = None ,_UpperCamelCase :Union[List[str], None] = None ,_UpperCamelCase :Union[str, List[str], None] = None ,_UpperCamelCase :bool = True ,):
snake_case_ : Union[str, Any] = [file for file in os.listdir(_UpperCamelCase ) if os.path.isfile(os.path.join(_UpperCamelCase ,_UpperCamelCase ) )]
if identifier is not None:
snake_case_ : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
for n_ in n_identifier:
snake_case_ : List[str] = [file for file in files if n_ not in file]
else:
snake_case_ : Union[str, Any] = [file for file in files if n_identifier not in file]
snake_case_ : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
snake_case_ : List[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" ,_UpperCamelCase )
if only_modules:
snake_case_ : Tuple = file.split(""".""" )[0]
try:
snake_case_ : List[str] = getattr(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : str = doctest.DocTestSuite(_UpperCamelCase )
snake_case_ : List[Any] = unittest.TextTestRunner().run(_UpperCamelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
snake_case_ : Optional[int] = doctest.testfile(str("""..""" / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def a__ ( self :Dict ):
snake_case_ : Union[str, Any] = Path("""src/transformers""" )
snake_case_ : Any = """modeling"""
snake_case_ : Any = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_UpperCamelCase ,identifier=_UpperCamelCase ,ignore_files=_UpperCamelCase )
def a__ ( self :Tuple ):
snake_case_ : List[Any] = Path("""src/transformers""" )
snake_case_ : Dict = """tokenization"""
self.analyze_directory(_UpperCamelCase ,identifier=_UpperCamelCase )
def a__ ( self :Tuple ):
snake_case_ : Tuple = Path("""src/transformers""" )
snake_case_ : List[str] = """configuration"""
self.analyze_directory(_UpperCamelCase ,identifier=_UpperCamelCase )
def a__ ( self :Any ):
snake_case_ : List[Any] = Path("""src/transformers""" )
snake_case_ : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_UpperCamelCase ,n_identifier=_UpperCamelCase )
def a__ ( self :Tuple ):
snake_case_ : Optional[int] = Path("""docs/source""" )
snake_case_ : List[Any] = ["""favicon.ico"""]
self.analyze_directory(_UpperCamelCase ,ignore_files=_UpperCamelCase ,only_modules=_UpperCamelCase )
| 334
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A : int = random.Random()
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=1.0 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Optional[Any]=None ):
'''simple docstring'''
if rng is None:
snake_case_ : str = global_rng
snake_case_ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :str ,_UpperCamelCase :Dict ,_UpperCamelCase :List[Any]=7 ,_UpperCamelCase :List[Any]=4_0_0 ,_UpperCamelCase :Any=2_0_0_0 ,_UpperCamelCase :Union[str, Any]=1 ,_UpperCamelCase :Tuple=0.0 ,_UpperCamelCase :Union[str, Any]=1_6_0_0_0 ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :List[Any]=True ,):
snake_case_ : Tuple = parent
snake_case_ : Any = batch_size
snake_case_ : Dict = min_seq_length
snake_case_ : List[str] = max_seq_length
snake_case_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : Union[str, Any] = feature_size
snake_case_ : Optional[Any] = padding_value
snake_case_ : List[Any] = sampling_rate
snake_case_ : Union[str, Any] = return_attention_mask
snake_case_ : Dict = do_normalize
def a__ ( self :Any ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :Any=False ):
def _flatten(_UpperCamelCase :Tuple ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
snake_case_ : int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
snake_case_ : int = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : List[Any] = WavaVecaFeatureExtractor
def a__ ( self :Tuple ):
snake_case_ : List[Any] = WavaVecaFeatureExtractionTester(self )
def a__ ( self :Any ,_UpperCamelCase :Dict ):
self.assertTrue(np.all(np.mean(_UpperCamelCase ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase ,axis=0 ) - 1 ) < 1E-3 ) )
def a__ ( self :Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : Any = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ : Tuple = feat_extract(speech_inputs[0] ,return_tensors="""np""" ).input_values
snake_case_ : int = feat_extract(np_speech_inputs[0] ,return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1E-3 ) )
# Test batched
snake_case_ : str = feat_extract(_UpperCamelCase ,return_tensors="""np""" ).input_values
snake_case_ : str = feat_extract(_UpperCamelCase ,return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase ,_UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : List[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case_ : int = np.asarray(_UpperCamelCase )
snake_case_ : int = feat_extract(_UpperCamelCase ,return_tensors="""np""" ).input_values
snake_case_ : List[Any] = feat_extract(_UpperCamelCase ,return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase ,_UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1E-3 ) )
def a__ ( self :Tuple ):
snake_case_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : int = ["""longest""", """max_length""", """do_not_pad"""]
snake_case_ : Any = [None, 1_6_0_0, None]
for max_length, padding in zip(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : Optional[int] = feat_extract(_UpperCamelCase ,padding=_UpperCamelCase ,max_length=_UpperCamelCase ,return_tensors="""np""" )
snake_case_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def a__ ( self :Optional[Any] ):
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Optional[int] = range(8_0_0 ,1_4_0_0 ,2_0_0 )
snake_case_ : str = [floats_list((1, x) )[0] for x in lengths]
snake_case_ : Tuple = ["""longest""", """max_length""", """do_not_pad"""]
snake_case_ : Tuple = [None, 1_6_0_0, None]
for max_length, padding in zip(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : Dict = feat_extract(_UpperCamelCase ,max_length=_UpperCamelCase ,padding=_UpperCamelCase )
snake_case_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def a__ ( self :Any ):
snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : int = feat_extract(
_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=1_0_0_0 ,padding="""max_length""" ,return_tensors="""np""" )
snake_case_ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a__ ( self :Tuple ):
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : int = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : Optional[Any] = feat_extract(
_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=1_0_0_0 ,padding="""longest""" ,return_tensors="""np""" )
snake_case_ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
snake_case_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
snake_case_ : Any = feat_extract(
_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=2_0_0_0 ,padding="""longest""" ,return_tensors="""np""" )
snake_case_ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def a__ ( self :int ):
import torch
snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Any = np.random.rand(1_0_0 ).astype(np.floataa )
snake_case_ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Tuple = feature_extractor.pad([{"""input_values""": inputs}] ,return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ : str = feature_extractor.pad([{"""input_values""": inputs}] ,return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def a__ ( self :Any ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case_ : Union[str, Any] = WavaVecaConfig.from_pretrained(_UpperCamelCase )
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == """layer""" )
| 334
| 1
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
# Base Case
if index == len(_lowerCAmelCase ):
return True
# Recursive Step
for i in range(_lowerCAmelCase ):
if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ):
# Color current vertex
UpperCamelCase : Optional[Any] = i
# Validate coloring
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ):
return True
# Backtrack
UpperCamelCase : int = -1
return False
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
UpperCamelCase : int = [-1] * len(_lowerCAmelCase )
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ):
return colored_vertices
return []
| 38
|
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
| 1
|
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [[] for _ in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE_ = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(__UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ = min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = ["".join(__UpperCamelCase ) for row in temp_grid]
SCREAMING_SNAKE_CASE_ = "".join(__UpperCamelCase )
return output_string
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE_ = [[] for _ in range(__UpperCamelCase )] # generates template
for position in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ = min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
SCREAMING_SNAKE_CASE_ = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE_ = input_string[counter : counter + len(__UpperCamelCase )]
grid.append(list(__UpperCamelCase ) )
counter += len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = "" # reads as zigzag
for position in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ = min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
for key_guess in range(1 , len(__UpperCamelCase ) ): # tries every key
SCREAMING_SNAKE_CASE_ = decrypt(__UpperCamelCase , __UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Dict = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[str]=13 , __magic_name__ : int=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : List[Any]=99 , __magic_name__ : Optional[Any]=32 , __magic_name__ : Dict=2 , __magic_name__ : int=4 , __magic_name__ : Optional[int]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Union[str, Any]=512 , __magic_name__ : Tuple=16 , __magic_name__ : str=2 , __magic_name__ : Dict=0.02 , __magic_name__ : Optional[int]=3 , __magic_name__ : Any=4 , __magic_name__ : Union[str, Any]=None , ) -> List[str]:
lowerCamelCase_ : Dict = parent
lowerCamelCase_ : Any = 13
lowerCamelCase_ : int = 7
lowerCamelCase_ : str = True
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : Tuple = 99
lowerCamelCase_ : Union[str, Any] = 384
lowerCamelCase_ : List[Any] = 2
lowerCamelCase_ : Dict = 4
lowerCamelCase_ : Tuple = 37
lowerCamelCase_ : int = "gelu"
lowerCamelCase_ : Union[str, Any] = 0.1
lowerCamelCase_ : List[Any] = 0.1
lowerCamelCase_ : Dict = 512
lowerCamelCase_ : Union[str, Any] = 16
lowerCamelCase_ : Dict = 2
lowerCamelCase_ : Optional[int] = 0.02
lowerCamelCase_ : Union[str, Any] = 3
lowerCamelCase_ : int = 4
lowerCamelCase_ : Any = 128
lowerCamelCase_ : List[Any] = 2
lowerCamelCase_ : List[Any] = 9
lowerCamelCase_ : List[str] = 1
lowerCamelCase_ : List[str] = None
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Union[str, Any] = None
if self.use_input_mask:
lowerCamelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Optional[Any] = None
if self.use_labels:
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__magic_name__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : List[str] ) -> Optional[int]:
lowerCamelCase_ : Union[str, Any] = TFConvBertModel(config=__magic_name__ )
lowerCamelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase_ : Optional[Any] = [input_ids, input_mask]
lowerCamelCase_ : Optional[int] = model(__magic_name__ )
lowerCamelCase_ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Optional[Any] ) -> List[Any]:
lowerCamelCase_ : Optional[Any] = TFConvBertForMaskedLM(config=__magic_name__ )
lowerCamelCase_ : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase_ : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Optional[Any]:
lowerCamelCase_ : str = self.num_labels
lowerCamelCase_ : List[Any] = TFConvBertForSequenceClassification(config=__magic_name__ )
lowerCamelCase_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase_ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : str ) -> Optional[Any]:
lowerCamelCase_ : Dict = self.num_choices
lowerCamelCase_ : Optional[Any] = TFConvBertForMultipleChoice(config=__magic_name__ )
lowerCamelCase_ : Tuple = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ : Optional[Any] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ : Tuple = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase_ : int = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> List[Any]:
lowerCamelCase_ : Any = self.num_labels
lowerCamelCase_ : Any = TFConvBertForTokenClassification(config=__magic_name__ )
lowerCamelCase_ : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase_ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Any ) -> int:
lowerCamelCase_ : Optional[int] = TFConvBertForQuestionAnswering(config=__magic_name__ )
lowerCamelCase_ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase_ : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
(
lowerCamelCase_
) : Optional[Any] = config_and_inputs
lowerCamelCase_ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
lowerCamelCase_ : str = TFConvBertModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Tuple = True
if hasattr(__magic_name__ , "use_cache" ):
lowerCamelCase_ : Any = True
lowerCamelCase_ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowerCamelCase_ : Optional[int] = getattr(self.model_tester , "key_length" , __magic_name__ )
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Optional[int] = model_class(__magic_name__ )
lowerCamelCase_ : Optional[int] = len(model(__magic_name__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ , saved_model=__magic_name__ )
lowerCamelCase_ : List[str] = os.path.join(__magic_name__ , "saved_model" , "1" )
lowerCamelCase_ : List[Any] = tf.keras.models.load_model(__magic_name__ )
lowerCamelCase_ : List[Any] = model(__magic_name__ )
if self.is_encoder_decoder:
lowerCamelCase_ : Any = outputs["encoder_hidden_states"]
lowerCamelCase_ : Union[str, Any] = outputs["encoder_attentions"]
else:
lowerCamelCase_ : List[Any] = outputs["hidden_states"]
lowerCamelCase_ : Tuple = outputs["attentions"]
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
lowerCamelCase_ : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase_ : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Optional[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
lowerCamelCase_ : Tuple = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowerCamelCase_ : List[str] = getattr(self.model_tester , "key_length" , __magic_name__ )
lowerCamelCase_ : Union[str, Any] = getattr(self.model_tester , "key_length" , __magic_name__ )
def check_decoder_attentions_output(__magic_name__ : Dict ):
lowerCamelCase_ : Optional[int] = len(__magic_name__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase_ : Any = outputs.decoder_attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__magic_name__ : Union[str, Any] ):
lowerCamelCase_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase_ : Any = True
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : List[Any] = model_class(__magic_name__ )
lowerCamelCase_ : Any = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase_ : Union[str, Any] = len(__magic_name__ )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
if self.is_encoder_decoder:
lowerCamelCase_ : List[Any] = model_class(__magic_name__ )
lowerCamelCase_ : Any = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_decoder_attentions_output(__magic_name__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase_ : str = True
lowerCamelCase_ : Dict = model_class(__magic_name__ )
lowerCamelCase_ : str = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
# Check attention is always last and order is fine
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : int = True
lowerCamelCase_ : Optional[int] = model_class(__magic_name__ )
lowerCamelCase_ : Optional[int] = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__magic_name__ ) )
self.assertEqual(model.config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
lowerCamelCase_ : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
lowerCamelCase_ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ : Union[str, Any] = model(__magic_name__ )[0]
lowerCamelCase_ : Any = [1, 6, 768]
self.assertEqual(output.shape , __magic_name__ )
lowerCamelCase_ : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1e-4 )
| 709
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
snake_case_ : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase_ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
lowerCamelCase_ : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
lowerCamelCase_ : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCamelCase_ : Tuple = value
elif weight_type == "weight_g":
lowerCamelCase_ : Optional[int] = value
elif weight_type == "weight_v":
lowerCamelCase_ : Tuple = value
elif weight_type == "bias":
lowerCamelCase_ : str = value
elif weight_type == "running_mean":
lowerCamelCase_ : List[str] = value
elif weight_type == "running_var":
lowerCamelCase_ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ : Tuple = value
elif weight_type == "inv_freq":
lowerCamelCase_ : Union[str, Any] = value
else:
lowerCamelCase_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
lowerCamelCase_ : Any = []
lowerCamelCase_ : int = fairseq_model.state_dict()
lowerCamelCase_ : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ : Any = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ : int = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ : Dict = True
if "*" in mapped_key:
lowerCamelCase_ : Union[str, Any] = name.split(__UpperCAmelCase )[0].split("." )[-2]
lowerCamelCase_ : Any = mapped_key.replace("*" , __UpperCAmelCase )
if "pos_bias_u" in name:
lowerCamelCase_ : str = None
elif "pos_bias_v" in name:
lowerCamelCase_ : Optional[int] = None
elif "weight_g" in name:
lowerCamelCase_ : str = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ : Any = "weight_v"
elif "bias" in name:
lowerCamelCase_ : Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ : List[Any] = "weight"
elif "running_mean" in name:
lowerCamelCase_ : Union[str, Any] = "running_mean"
elif "inv_freq" in name:
lowerCamelCase_ : Optional[Any] = "inv_freq"
elif "running_var" in name:
lowerCamelCase_ : int = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ : Union[str, Any] = "num_batches_tracked"
else:
lowerCamelCase_ : List[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def __a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = full_name.split("conv_layers." )[-1]
lowerCamelCase_ : Union[str, Any] = name.split("." )
lowerCamelCase_ : Optional[int] = int(items[0] )
lowerCamelCase_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
lowerCamelCase_ : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
lowerCamelCase_ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
lowerCamelCase_ : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
lowerCamelCase_ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
lowerCamelCase_ : Any = WavaVecaConformerConfig.from_pretrained(__UpperCAmelCase , hidden_act="swish" )
else:
lowerCamelCase_ : List[str] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase_ : str = "rotary"
if is_finetuned:
if dict_path:
lowerCamelCase_ : int = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ : List[Any] = target_dict.pad_index
lowerCamelCase_ : List[str] = target_dict.bos_index
lowerCamelCase_ : Any = target_dict.eos_index
lowerCamelCase_ : Any = len(target_dict.symbols )
lowerCamelCase_ : Union[str, Any] = os.path.join(__UpperCAmelCase , "vocab.json" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCamelCase_ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : List[Any] = 1
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : List[Any] = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__UpperCAmelCase , )
lowerCamelCase_ : Dict = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
lowerCamelCase_ : Dict = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Dict = WavaVecaConformerForCTC(__UpperCAmelCase )
else:
lowerCamelCase_ : int = WavaVecaConformerForPreTraining(__UpperCAmelCase )
if is_finetuned:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCamelCase_ : Optional[int] = argparse.Namespace(task="audio_pretraining" )
lowerCamelCase_ : List[Any] = fairseq.tasks.setup_task(__UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCAmelCase )
lowerCamelCase_ : List[str] = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
snake_case_ : Optional[Any] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 253
| 0
|
'''simple docstring'''
lowercase_ = 8.31_4462 # Unit - J mol-1 K-1
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11
|
import os
import sys
import unittest
__a: Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__a: Dict = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__a: Union[str, Any] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = get_test_to_tester_mapping(lowerCamelCase )
_UpperCAmelCase = get_test_to_tester_mapping(lowerCamelCase )
_UpperCAmelCase = {"""BertModelTest""": """BertModelTester"""}
_UpperCAmelCase = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase = get_model_to_test_mapping(lowerCamelCase )
_UpperCAmelCase = get_model_to_test_mapping(lowerCamelCase )
_UpperCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
_UpperCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase = get_model_to_tester_mapping(lowerCamelCase )
_UpperCAmelCase = get_model_to_tester_mapping(lowerCamelCase )
_UpperCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
_UpperCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
| 108
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a = logging.get_logger("transformers.models.speecht5")
a = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
a = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
a = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
a = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
a = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
a = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
a = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
a = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = []
a = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
__SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "running_mean":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "running_var":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "num_batches_tracked":
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__SCREAMING_SNAKE_CASE = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
if task == "s2t":
__SCREAMING_SNAKE_CASE = hf_model.speechta.encoder.prenet.feature_encoder
__SCREAMING_SNAKE_CASE = MAPPING_S2T
__SCREAMING_SNAKE_CASE = IGNORE_KEYS_S2T
elif task == "t2s":
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = MAPPING_T2S
__SCREAMING_SNAKE_CASE = IGNORE_KEYS_T2S
elif task == "s2s":
__SCREAMING_SNAKE_CASE = hf_model.speechta.encoder.prenet.feature_encoder
__SCREAMING_SNAKE_CASE = MAPPING_S2S
__SCREAMING_SNAKE_CASE = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info(f"""{name} was ignored""" )
continue
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__SCREAMING_SNAKE_CASE = key.split(""".*.""" )
if prefix in name and suffix in name:
__SCREAMING_SNAKE_CASE = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(__SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = "weight_g"
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = "weight_v"
elif "bias" in name:
__SCREAMING_SNAKE_CASE = "bias"
elif "weight" in name:
__SCREAMING_SNAKE_CASE = "weight"
elif "running_mean" in name:
__SCREAMING_SNAKE_CASE = "running_mean"
elif "running_var" in name:
__SCREAMING_SNAKE_CASE = "running_var"
elif "num_batches_tracked" in name:
__SCREAMING_SNAKE_CASE = "num_batches_tracked"
else:
__SCREAMING_SNAKE_CASE = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE = SpeechTaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = SpeechTaConfig()
if task == "s2t":
__SCREAMING_SNAKE_CASE = config.max_text_positions
__SCREAMING_SNAKE_CASE = SpeechTaForSpeechToText(__SCREAMING_SNAKE_CASE )
elif task == "t2s":
__SCREAMING_SNAKE_CASE = 1876
__SCREAMING_SNAKE_CASE = 600
__SCREAMING_SNAKE_CASE = config.max_speech_positions
__SCREAMING_SNAKE_CASE = SpeechTaForTextToSpeech(__SCREAMING_SNAKE_CASE )
elif task == "s2s":
__SCREAMING_SNAKE_CASE = 1876
__SCREAMING_SNAKE_CASE = config.max_speech_positions
__SCREAMING_SNAKE_CASE = SpeechTaForSpeechToSpeech(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
__SCREAMING_SNAKE_CASE = SpeechTaTokenizer(__SCREAMING_SNAKE_CASE , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken("""<mask>""" , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
__SCREAMING_SNAKE_CASE = SpeechTaFeatureExtractor()
__SCREAMING_SNAKE_CASE = SpeechTaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.load(__SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint["""model"""] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 703
|
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : List[str]=16 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Optional[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , *lowerCAmelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowercase ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE_ = inputs_dict['''labels''']
SCREAMING_SNAKE_CASE_ = inputs_dict['''labels''']
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OpenAIGPTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=37 )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
SCREAMING_SNAKE_CASE_ = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
SCREAMING_SNAKE_CASE_ = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 393
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = """philschmid/bart-large-cnn-samsum"""
_SCREAMING_SNAKE_CASE : Optional[Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_SCREAMING_SNAKE_CASE : Tuple = """summarizer"""
_SCREAMING_SNAKE_CASE : Any = AutoTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE : int = ["""text"""]
_SCREAMING_SNAKE_CASE : List[Any] = ["""text"""]
def a ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , truncation=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )[0]
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
return self.pre_processor.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
| 427
| 0
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class _SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
__a : Union[str, Any] = '''bart'''
__a : Optional[int] = ['''past_key_values''']
__a : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , lowercase : Union[str, Any]=5_0_2_6_5 , lowercase : int=1_0_2_4 , lowercase : Optional[Any]=1_2 , lowercase : Dict=4_0_9_6 , lowercase : Dict=1_6 , lowercase : int=1_2 , lowercase : Optional[Any]=4_0_9_6 , lowercase : Optional[Any]=1_6 , lowercase : Tuple=0.0 , lowercase : Dict=0.0 , lowercase : Optional[Any]="gelu" , lowercase : Dict=1_0_2_4 , lowercase : Any=0.1 , lowercase : List[Any]=0.0 , lowercase : str=0.0 , lowercase : Optional[int]=0.0_2 , lowercase : Optional[int]=0.0 , lowercase : List[str]=False , lowercase : Tuple=True , lowercase : List[Any]=3 , lowercase : str=1 , lowercase : Tuple=0 , lowercase : Union[str, Any]=2 , lowercase : Optional[int]=True , lowercase : Optional[Any]=2 , lowercase : List[str]=2 , **lowercase : Tuple , ) -> int:
'''simple docstring'''
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = use_cache
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , snake_case__ ):
UpperCamelCase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
class _SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
@property
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase__ = {0: """batch"""}
UpperCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCamelCase__ = {0: """batch""", 1: """decoder_sequence"""}
UpperCamelCase__ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase__ , UpperCamelCase__ = self.num_layers
for i in range(snake_case__ ):
UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCamelCase__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def A ( self : int ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ = super().outputs
else:
UpperCamelCase__ = super(snake_case__ , self ).outputs
if self.use_past:
UpperCamelCase__ , UpperCamelCase__ = self.num_layers
for i in range(snake_case__ ):
UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A ( self : Optional[int] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Generate decoder inputs
UpperCamelCase__ = seq_length if not self.use_past else 1
UpperCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase__ = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase__ = dict(**snake_case__ , **snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase__ , UpperCamelCase__ = common_inputs["""input_ids"""].shape
UpperCamelCase__ = common_inputs["""decoder_input_ids"""].shape[1]
UpperCamelCase__ , UpperCamelCase__ = self.num_attention_heads
UpperCamelCase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ = decoder_seq_length + 3
UpperCamelCase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase__ = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(snake_case__ , snake_case__ )] , dim=1 )
UpperCamelCase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase__ , UpperCamelCase__ = self.num_layers
UpperCamelCase__ = min(snake_case__ , snake_case__ )
UpperCamelCase__ = max(snake_case__ , snake_case__ ) - min_num_layers
UpperCamelCase__ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(snake_case__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
) )
# TODO: test this.
UpperCamelCase__ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(snake_case__ , snake_case__ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) )
return common_inputs
def A ( self : Optional[Any] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase__ , UpperCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase__ = seqlen + 2
UpperCamelCase__ , UpperCamelCase__ = self.num_layers
UpperCamelCase__ , UpperCamelCase__ = self.num_attention_heads
UpperCamelCase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ = common_inputs["""attention_mask"""].dtype
UpperCamelCase__ = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
UpperCamelCase__ = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(snake_case__ )
]
return common_inputs
def A ( self : Optional[Any] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = tokenizer.num_special_tokens_to_add(snake_case__ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase__ = dict(tokenizer(snake_case__ , return_tensors=snake_case__ ) )
return common_inputs
def A ( self : Union[str, Any] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
elif self.task == "causal-lm":
UpperCamelCase__ = self._generate_dummy_inputs_for_causal_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
else:
UpperCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
return common_inputs
def A ( self : Tuple , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : int ) -> Tuple:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ = super()._flatten_past_key_values_(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
UpperCamelCase__ = super(snake_case__ , self )._flatten_past_key_values_(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
| 706
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : str = field(default="audio-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__a : ClassVar[Features] = Features({"audio": Audio()} )
__a : ClassVar[Features] = Features({"labels": ClassLabel} )
__a : str = "audio"
__a : str = "labels"
def A ( self : List[Any] , lowercase : List[Any] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def A ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 265
| 0
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Dict , *UpperCamelCase: Any , **UpperCamelCase: List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 328
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case__ = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def a_ ( _A , _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = str(_A )
dataset_info.write_to_directory(_A )
snake_case__ = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case__ = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case__ = yaml.safe_dump(_A )
snake_case__ = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = DatasetInfo()
snake_case__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def a_ ( _A , _A ) -> str:
"""simple docstring"""
snake_case__ = str(_A )
dataset_infos_dict.write_to_directory(_A )
snake_case__ = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 328
| 1
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
lowerCamelCase__ = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
lowerCamelCase__ = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
lowerCamelCase__ = BeautifulSoup(res.text, """html.parser""")
lowerCamelCase__ = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 708
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class A__ ( _lowerCamelCase):
A_ : Union[PIL.Image.Image, np.ndarray]
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
prior=_SCREAMING_SNAKE_CASE , image_encoder=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , renderer=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : List[str] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Tuple = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase : str = torch.cat(_SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(_SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Optional[int] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
__lowerCAmelCase : Dict = image.to(dtype=self.image_encoder.dtype , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.image_encoder(_SCREAMING_SNAKE_CASE )['last_hidden_state']
__lowerCAmelCase : Optional[int] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCAmelCase : Tuple = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[Any] = torch.zeros_like(_SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 25 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__lowerCAmelCase : Union[str, Any] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Tuple = image.shape[0]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCAmelCase : Any = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Optional[Any] = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Any = guidance_scale > 1.0
__lowerCAmelCase : List[Any] = self._encode_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.scheduler.timesteps
__lowerCAmelCase : Optional[int] = self.prior.config.num_embeddings
__lowerCAmelCase : List[str] = self.prior.config.embedding_dim
__lowerCAmelCase : Any = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCAmelCase : str = latents.reshape(latents.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Optional[int] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.prior(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , proj_embedding=_SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
__lowerCAmelCase , __lowerCAmelCase : Tuple = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
__lowerCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCAmelCase : Dict = self.scheduler.step(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = []
for i, latent in enumerate(_SCREAMING_SNAKE_CASE ):
print()
__lowerCAmelCase : int = self.renderer.decode(
latent[None, :] , _SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.stack(_SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
__lowerCAmelCase : int = images.cpu().numpy()
if output_type == "pil":
__lowerCAmelCase : Dict = [self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 549
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "altclip_text_model"
def __init__( self , SCREAMING_SNAKE_CASE__=25_00_02 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_14 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-05 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=7_68 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
snake_case: int = vocab_size
snake_case: List[str] = hidden_size
snake_case: List[str] = num_hidden_layers
snake_case: List[Any] = num_attention_heads
snake_case: int = hidden_act
snake_case: List[Any] = intermediate_size
snake_case: Optional[Any] = hidden_dropout_prob
snake_case: Tuple = attention_probs_dropout_prob
snake_case: Optional[int] = max_position_embeddings
snake_case: List[str] = type_vocab_size
snake_case: str = initializer_range
snake_case: Union[str, Any] = initializer_factor
snake_case: List[Any] = layer_norm_eps
snake_case: Union[str, Any] = position_embedding_type
snake_case: Union[str, Any] = use_cache
snake_case: Any = project_dim
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "altclip_vision_model"
def __init__( self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="quick_gelu" , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**_snake_case )
snake_case: Any = hidden_size
snake_case: Dict = intermediate_size
snake_case: str = projection_dim
snake_case: str = num_hidden_layers
snake_case: Tuple = num_attention_heads
snake_case: int = num_channels
snake_case: Union[str, Any] = patch_size
snake_case: List[Any] = image_size
snake_case: int = initializer_range
snake_case: str = initializer_factor
snake_case: Union[str, Any] = attention_dropout
snake_case: str = layer_norm_eps
snake_case: str = hidden_act
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
cls._set_token_in_kwargs(_snake_case )
snake_case: str = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
snake_case: Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "altclip"
__UpperCamelCase = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=2.65_92 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = kwargs.pop('text_config_dict' , _snake_case )
snake_case: str = kwargs.pop('vision_config_dict' , _snake_case )
super().__init__(**_snake_case )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case: int = {}
# This is the complete result when using `text_config_dict`.
snake_case: List[str] = AltCLIPTextConfig(**_snake_case ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case: Union[str, Any] = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case: Tuple = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(_snake_case )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
snake_case: Dict = {}
# This is the complete result when using `vision_config_dict`.
snake_case: Dict = AltCLIPVisionConfig(**_snake_case ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case: Optional[Any] = {
str(_snake_case ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case: Dict = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case: List[str] = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(_snake_case )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
snake_case: Optional[int] = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
snake_case: Tuple = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
snake_case: Any = AltCLIPTextConfig(**_snake_case )
snake_case: str = AltCLIPVisionConfig(**_snake_case )
snake_case: Tuple = projection_dim
snake_case: Any = logit_scale_init_value
snake_case: List[Any] = 1.0
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = copy.deepcopy(self.__dict__ )
snake_case: Any = self.text_config.to_dict()
snake_case: int = self.vision_config.to_dict()
snake_case: Union[str, Any] = self.__class__.model_type
return output
| 329
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
| 0
|
'''simple docstring'''
from maths.prime_check import is_prime
def _UpperCamelCase ( _a : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
__UpperCamelCase : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_A )
if is_prime(_A ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a= {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _UpperCamelCase ( _a : int , _a : Union[str, Any] , _a : str , _a : List[str] , _a : Dict=False , _a : str=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCamelCase : List[Any] = cached_file(_a , _a , force_download=not use_cached_models )
__UpperCamelCase : Union[str, Any] = config_class.from_json_file(_a )
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : List[str] = True
print(f"""Building TensorFlow model from configuration: {config}""" )
__UpperCamelCase : Optional[int] = model_class(_a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCamelCase : Union[str, Any] = cached_file(
_a , _a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCamelCase : Any = load_pytorch_checkpoint_in_tfa_model(_a , _a )
if compare_with_pt_model:
__UpperCamelCase : Dict = tf_model(tf_model.dummy_inputs , training=_a ) # build the network
__UpperCamelCase : List[str] = torch.load(_a , map_location='cpu' )
__UpperCamelCase : Union[str, Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_a , config=_a , state_dict=_a )
with torch.no_grad():
__UpperCamelCase : Any = pt_model(**pt_model.dummy_inputs )
__UpperCamelCase : Optional[Any] = pto[0].numpy()
__UpperCamelCase : int = tfo[0].numpy()
__UpperCamelCase : Union[str, Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(_a , save_format='h5' )
def _UpperCamelCase ( _a : str , _a : Optional[Any] , _a : Dict=None , _a : Tuple=None , _a : int=False , _a : Union[str, Any]=False , _a : int=False , _a : List[str]=False , ):
"""simple docstring"""
if args_model_type is None:
__UpperCamelCase : Dict = list(MODEL_CLASSES.keys() )
else:
__UpperCamelCase : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(_a , start=1 ):
print('=' * 1_0_0 )
print(f""" Converting model type {j}/{len(_a )}: {model_type}""" )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCamelCase : List[str] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCamelCase : Optional[int] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_a , _a ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
__UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(_a )}: {model_shortcut_name} - model_type {model_type}""" )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
__UpperCamelCase : Tuple = cached_file(_a , _a , force_download=not use_cached_models )
else:
__UpperCamelCase : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCamelCase : Dict = cached_file(_a , _a , force_download=not use_cached_models )
else:
__UpperCamelCase : Tuple = model_shortcut_name
if os.path.isfile(_a ):
__UpperCamelCase : int = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=_a , pytorch_checkpoint_path=_a , config_file=_a , tf_dump_path=os.path.join(_a , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=_a , )
if remove_cached_files:
os.remove(_a )
os.remove(_a )
if __name__ == "__main__":
a= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
a= parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 287
| 0
|
from string import ascii_lowercase, ascii_uppercase
def __lowercase ( a__ ) -> str:
if not sentence:
return ""
__SCREAMING_SNAKE_CASE = dict(zip(a__ , a__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 148
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : List[str] ={
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int =['''CLIPFeatureExtractor''']
lowerCAmelCase__ : Optional[Any] =['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any =[
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 148
| 1
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowercase ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
@register_to_config
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , ):
super().__init__()
__a : Dict = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : List[Any] = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : int = False
__a : Union[str, Any] = nn.Dropout(p=_SCREAMING_SNAKE_CASE )
__a : Any = TaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , d_model=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , feed_forward_proj=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , )
__a : List[Any] = nn.ModuleList()
for lyr_num in range(_SCREAMING_SNAKE_CASE ):
__a : str = TaBlock(_SCREAMING_SNAKE_CASE )
self.encoders.append(_SCREAMING_SNAKE_CASE )
__a : List[str] = TaLayerNorm(_SCREAMING_SNAKE_CASE )
__a : List[str] = nn.Dropout(p=_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : str = self.token_embedder(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = encoder_input_tokens.shape[1]
__a : Tuple = torch.arange(_SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device )
x += self.position_encoding(_SCREAMING_SNAKE_CASE )
__a : List[str] = self.dropout_pre(_SCREAMING_SNAKE_CASE )
# inverted the attention mask
__a : List[str] = encoder_input_tokens.size()
__a : Optional[Any] = self.get_extended_attention_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for lyr in self.encoders:
__a : int = lyr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
__a : str = self.layer_norm(_SCREAMING_SNAKE_CASE )
return self.dropout_post(_SCREAMING_SNAKE_CASE ), encoder_inputs_mask
| 721
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def __A ( a_ :int) -> typing.Counter[int]:
__a : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1):
for perpendicular in range(a_ , max_perimeter + 1):
__a : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a_):
__a : List[Any] = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __A ( a_ :int = 10_00) -> int:
__a : Dict = pythagorean_triple(a_)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 101
| 0
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ = '''PoolFormerConfig'''
# Base docstring
SCREAMING_SNAKE_CASE_ = '''sail/poolformer_s12'''
SCREAMING_SNAKE_CASE_ = [1, 512, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ = '''sail/poolformer_s12'''
SCREAMING_SNAKE_CASE_ = '''tabby, tabby cat'''
SCREAMING_SNAKE_CASE_ = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase (_lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False ):
if drop_prob == 0.0 or not training:
return input
__lowerCAmelCase = 1 - drop_prob
__lowerCAmelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCAmelCase = keep_prob + torch.rand(_lowerCAmelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__lowerCAmelCase = input.div(_lowerCAmelCase ) * random_tensor
return output
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ = None ) -> None:
super().__init__()
__lowerCAmelCase = drop_prob
def A__ ( self , snake_case_ ) -> torch.Tensor:
return drop_path(snake_case_ , self.drop_prob , self.training )
def A__ ( self ) -> str:
return "p={}".format(self.drop_prob )
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> List[Any]:
super().__init__()
__lowerCAmelCase = patch_size if isinstance(snake_case_ , collections.abc.Iterable ) else (patch_size, patch_size)
__lowerCAmelCase = stride if isinstance(snake_case_ , collections.abc.Iterable ) else (stride, stride)
__lowerCAmelCase = padding if isinstance(snake_case_ , collections.abc.Iterable ) else (padding, padding)
__lowerCAmelCase = nn.Convad(snake_case_ , snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=snake_case_ )
__lowerCAmelCase = norm_layer(snake_case_ ) if norm_layer else nn.Identity()
def A__ ( self , snake_case_ ) -> Optional[int]:
__lowerCAmelCase = self.projection(snake_case_ )
__lowerCAmelCase = self.norm(snake_case_ )
return embeddings
class lowerCAmelCase_ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , snake_case_ , **snake_case_ ) -> Optional[Any]:
super().__init__(1 , snake_case_ , **snake_case_ )
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> int:
super().__init__()
__lowerCAmelCase = nn.AvgPoolad(snake_case_ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case_ )
def A__ ( self , snake_case_ ) -> Optional[int]:
return self.pool(snake_case_ ) - hidden_states
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
super().__init__()
__lowerCAmelCase = nn.Convad(snake_case_ , snake_case_ , 1 )
__lowerCAmelCase = nn.Convad(snake_case_ , snake_case_ , 1 )
__lowerCAmelCase = PoolFormerDropPath(snake_case_ )
if isinstance(config.hidden_act , snake_case_ ):
__lowerCAmelCase = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase = config.hidden_act
def A__ ( self , snake_case_ ) -> Dict:
__lowerCAmelCase = self.conva(snake_case_ )
__lowerCAmelCase = self.act_fn(snake_case_ )
__lowerCAmelCase = self.drop(snake_case_ )
__lowerCAmelCase = self.conva(snake_case_ )
__lowerCAmelCase = self.drop(snake_case_ )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
super().__init__()
__lowerCAmelCase = PoolFormerPooling(snake_case_ )
__lowerCAmelCase = PoolFormerOutput(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = PoolFormerGroupNorm(snake_case_ )
__lowerCAmelCase = PoolFormerGroupNorm(snake_case_ )
# Useful for training neural nets
__lowerCAmelCase = PoolFormerDropPath(snake_case_ ) if drop_path > 0.0 else nn.Identity()
__lowerCAmelCase = config.use_layer_scale
if config.use_layer_scale:
__lowerCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case_) ) , requires_grad=snake_case_ )
__lowerCAmelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case_) ) , requires_grad=snake_case_ )
def A__ ( self , snake_case_ ) -> List[str]:
if self.use_layer_scale:
__lowerCAmelCase = self.pooling(self.before_norm(snake_case_ ) )
__lowerCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__lowerCAmelCase = hidden_states + self.drop_path(snake_case_ )
__lowerCAmelCase = ()
__lowerCAmelCase = self.output(self.after_norm(snake_case_ ) )
__lowerCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__lowerCAmelCase = hidden_states + self.drop_path(snake_case_ )
__lowerCAmelCase = (output,) + outputs
return outputs
else:
__lowerCAmelCase = self.drop_path(self.pooling(self.before_norm(snake_case_ ) ) )
# First residual connection
__lowerCAmelCase = pooling_output + hidden_states
__lowerCAmelCase = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCAmelCase = self.drop_path(self.output(self.after_norm(snake_case_ ) ) )
__lowerCAmelCase = hidden_states + layer_output
__lowerCAmelCase = (output,) + outputs
return outputs
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Dict:
super().__init__()
__lowerCAmelCase = config
# stochastic depth decay rule
__lowerCAmelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__lowerCAmelCase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__lowerCAmelCase = nn.ModuleList(snake_case_ )
# Transformer blocks
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__lowerCAmelCase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
snake_case_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(snake_case_ ) )
__lowerCAmelCase = nn.ModuleList(snake_case_ )
def A__ ( self , snake_case_ , snake_case_=False , snake_case_=True ) -> Dict:
__lowerCAmelCase = () if output_hidden_states else None
__lowerCAmelCase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__lowerCAmelCase , __lowerCAmelCase = layers
# Get patch embeddings from hidden_states
__lowerCAmelCase = embedding_layer(snake_case_ )
# Send the embeddings through the blocks
for _, blk in enumerate(snake_case_ ):
__lowerCAmelCase = blk(snake_case_ )
__lowerCAmelCase = layer_outputs[0]
if output_hidden_states:
__lowerCAmelCase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case_ , hidden_states=snake_case_ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = PoolFormerConfig
_snake_case = '''poolformer'''
_snake_case = '''pixel_values'''
_snake_case = True
def A__ ( self , snake_case_ ) -> List[str]:
if isinstance(snake_case_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def A__ ( self , snake_case_ , snake_case_=False ) -> List[str]:
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = value
SCREAMING_SNAKE_CASE_ = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
SCREAMING_SNAKE_CASE_ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , A__ , )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__(snake_case_ )
__lowerCAmelCase = config
__lowerCAmelCase = PoolFormerEncoder(snake_case_ )
# Initialize weights and apply final processing
self.post_init()
def A__ ( self ) -> Dict:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__lowerCAmelCase = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , )
__lowerCAmelCase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case_ , hidden_states=encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> str:
super().__init__()
__lowerCAmelCase = nn.Linear(config.hidden_size , config.hidden_size )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = self.dense(snake_case_ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , A__ , )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> List[Any]:
super().__init__(snake_case_ )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = PoolFormerModel(snake_case_ )
# Final norm
__lowerCAmelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__lowerCAmelCase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.poolformer(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , )
__lowerCAmelCase = outputs[0]
__lowerCAmelCase = self.classifier(self.norm(snake_case_ ).mean([-2, -1] ) )
__lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase = """single_label_classification"""
else:
__lowerCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCAmelCase = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase = BCEWithLogitsLoss()
__lowerCAmelCase = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states )
| 465
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''efficientformer'''
def __init__( self , snake_case_ = [3, 2, 6, 4] , snake_case_ = [48, 96, 224, 448] , snake_case_ = [True, True, True, True] , snake_case_ = 448 , snake_case_ = 32 , snake_case_ = 4 , snake_case_ = 7 , snake_case_ = 5 , snake_case_ = 8 , snake_case_ = 4 , snake_case_ = 0.0 , snake_case_ = 16 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 2 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1 , snake_case_ = True , snake_case_ = True , snake_case_ = 1e-5 , snake_case_ = "gelu" , snake_case_ = 0.02 , snake_case_ = 1e-1_2 , snake_case_ = 224 , snake_case_ = 1e-0_5 , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = depths
__lowerCAmelCase = mlp_expansion_ratio
__lowerCAmelCase = downsamples
__lowerCAmelCase = dim
__lowerCAmelCase = key_dim
__lowerCAmelCase = attention_ratio
__lowerCAmelCase = resolution
__lowerCAmelCase = pool_size
__lowerCAmelCase = downsample_patch_size
__lowerCAmelCase = downsample_stride
__lowerCAmelCase = downsample_pad
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = num_metaad_blocks
__lowerCAmelCase = distillation
__lowerCAmelCase = use_layer_scale
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = image_size
__lowerCAmelCase = batch_norm_eps
| 465
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCamelCase : int = ["gpt2"]
_UpperCamelCase : Optional[int] = "gpt2"
if is_tf_available():
class _snake_case ( tf.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
lowerCAmelCase = tokenizer
lowerCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = TFGPTaLMHeadModel.from_config(_SCREAMING_SNAKE_CASE )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenized['input_ids'].to_tensor()
lowerCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase = self.model(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )['logits']
return outputs
@require_tf
@require_keras_nlp
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase = [GPTaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase = [TFGPTaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase = tokenizer([test_inputs] , return_tensors='tf' )
lowerCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase = python_outputs[key].numpy()
lowerCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(_SCREAMING_SNAKE_CASE , tf.intaa ) == tf_outputs_values ) )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase = tf.function(_SCREAMING_SNAKE_CASE )
for test_inputs in self.test_sentences:
lowerCAmelCase = tf.constant(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = compiled_tokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tf_tokenizer(_SCREAMING_SNAKE_CASE )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase = ModelToSave(tokenizer=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase = model.serving(_SCREAMING_SNAKE_CASE ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE ) / 'saved.model'
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={'serving_default': model.serving} )
lowerCAmelCase = tf.saved_model.load(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = loaded_model.signatures['serving_default'](_SCREAMING_SNAKE_CASE )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase = tf_tokenizer(_SCREAMING_SNAKE_CASE ) # Build model with some sample inputs
lowerCAmelCase = tf_tokenizer.get_config()
lowerCAmelCase = TFGPTaTokenizer.from_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_from_config(_SCREAMING_SNAKE_CASE )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase = 12_31_23
for max_length in [3, 5, 10_24]:
lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase = tf_tokenizer(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 702
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {"vocab_file": "spiece.model"}
_UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_UpperCamelCase : List[Any] = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
lowerCAmelCase = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = '<|endoftext|>' if eos_token is None else eos_token
lowerCAmelCase = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = '<pad>' if pad_token is None else pad_token
lowerCAmelCase = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
F'[{"".join(map(_SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.non_printing_characters_re.sub('' , _SCREAMING_SNAKE_CASE )
# Normalize whitespaces
lowerCAmelCase = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize('NFC' , _SCREAMING_SNAKE_CASE )
return text
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.preprocess_text(_SCREAMING_SNAKE_CASE )
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return out_string
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [self.preprocess_text(_SCREAMING_SNAKE_CASE ) for t in text]
lowerCAmelCase = self.sp_model.encode(_SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE )
return token_ids
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.decode(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(_SCREAMING_SNAKE_CASE ) + F'{self.bos_token}Bot:'
)
return self.encode(text=_SCREAMING_SNAKE_CASE )
| 514
| 0
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_lowerCamelCase : int = nn.Linear(3 , 4 )
_lowerCamelCase : Union[str, Any] = nn.BatchNormad(4 )
_lowerCamelCase : Dict = nn.Linear(4 , 5 )
def A_ ( self , lowercase ):
return self.lineara(self.batchnorm(self.lineara(lowercase ) ) )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self , lowercase , *lowercase , **lowercase ):
return (args[0] + 1,) + args[1:], kwargs
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self , lowercase , lowercase ):
return output + 1
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : int = ModelForTest()
_lowerCamelCase : List[str] = ModelHook()
add_hook_to_module(lowercase , lowercase )
self.assertEqual(test_model._hf_hook , lowercase )
self.assertTrue(hasattr(lowercase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowercase )
self.assertFalse(hasattr(lowercase , '_hf_hook' ) )
self.assertFalse(hasattr(lowercase , '_old_forward' ) )
def A_ ( self ):
_lowerCamelCase : List[Any] = ModelForTest()
_lowerCamelCase : int = ModelHook()
add_hook_to_module(lowercase , lowercase )
add_hook_to_module(lowercase , lowercase , append=lowercase )
self.assertEqual(isinstance(test_model._hf_hook , lowercase ) , lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowercase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowercase )
self.assertFalse(hasattr(lowercase , '_hf_hook' ) )
self.assertFalse(hasattr(lowercase , '_old_forward' ) )
def A_ ( self ):
_lowerCamelCase : Dict = ModelForTest()
_lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
_lowerCamelCase : Union[str, Any] = test_model(x + 1 )
_lowerCamelCase : str = test_model(x + 2 )
_lowerCamelCase : str = PreForwardHook()
add_hook_to_module(lowercase , lowercase )
_lowerCamelCase : Dict = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCamelCase : Union[str, Any] = PreForwardHook()
add_hook_to_module(lowercase , lowercase )
_lowerCamelCase : Optional[Any] = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCamelCase : Union[str, Any] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowercase , lowercase )
_lowerCamelCase : Any = test_model(lowercase )
assert torch.allclose(lowercase , lowercase , atol=1E-5 )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ModelForTest()
_lowerCamelCase : Union[str, Any] = torch.randn(2 , 3 )
_lowerCamelCase : List[str] = test_model(lowercase )
_lowerCamelCase : List[Any] = PostForwardHook()
add_hook_to_module(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCamelCase : str = PostForwardHook()
add_hook_to_module(lowercase , lowercase )
_lowerCamelCase : List[str] = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCamelCase : Optional[int] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowercase , lowercase )
_lowerCamelCase : List[Any] = test_model(lowercase )
assert torch.allclose(lowercase , output + 2 , atol=1E-5 )
def A_ ( self ):
_lowerCamelCase : str = ModelForTest()
_lowerCamelCase : Dict = torch.randn(2 , 3 )
_lowerCamelCase : int = test_model(lowercase )
_lowerCamelCase : List[str] = PostForwardHook()
add_hook_to_module(lowercase , lowercase )
_lowerCamelCase : Any = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = test_model(lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A_ ( self ):
_lowerCamelCase : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_lowerCamelCase : Dict = torch.randn(2 , 3 )
_lowerCamelCase : List[str] = model(lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowercase , AlignDevicesHook(io_same_device=lowercase ) )
_lowerCamelCase : List[str] = torch.randn(2 , 3 ).to(0 )
_lowerCamelCase : Optional[Any] = model(lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def A_ ( self ):
_lowerCamelCase : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCamelCase : Optional[Any] = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCamelCase : Tuple = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowercase )
_lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
_lowerCamelCase : Dict = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
_lowerCamelCase : Any = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_lowerCamelCase : Tuple = torch.randn(2 , 3 )
_lowerCamelCase : str = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def A_ ( self ):
_lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCamelCase : Union[str, Any] = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowercase , execution_device=lowercase , offload=lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCamelCase : str = torch.device(lowercase )
self.assertEqual(model.batchnorm.running_mean.device , lowercase )
_lowerCamelCase : str = torch.randn(2 , 3 )
_lowerCamelCase : Any = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowercase , execution_device=lowercase , offload=lowercase , offload_buffers=lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_lowerCamelCase : List[str] = torch.randn(2 , 3 )
_lowerCamelCase : str = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def A_ ( self ):
_lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCamelCase : Any = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowercase , execution_device=lowercase , offload=lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCamelCase : List[Any] = torch.device(lowercase )
self.assertEqual(model.batchnorm.running_mean.device , lowercase )
_lowerCamelCase : Dict = torch.randn(2 , 3 )
_lowerCamelCase : Optional[Any] = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowercase , execution_device=lowercase , offload=lowercase , weights_map=model.state_dict() , offload_buffers=lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_lowerCamelCase : Any = torch.randn(2 , 3 )
_lowerCamelCase : Any = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 630
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : Dict = is_training
_lowerCamelCase : List[Any] = use_input_mask
_lowerCamelCase : Optional[int] = use_token_type_ids
_lowerCamelCase : str = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Any = num_labels
_lowerCamelCase : List[str] = num_choices
_lowerCamelCase : Tuple = scope
def A_ ( self ):
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Tuple = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = LlamaModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : List[Any] = model(lowercase , attention_mask=lowercase )
_lowerCamelCase : Any = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
_lowerCamelCase : str = True
_lowerCamelCase : Dict = LlamaModel(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
_lowerCamelCase : Tuple = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , )
_lowerCamelCase : Union[str, Any] = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
_lowerCamelCase : Optional[Any] = LlamaForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : List[Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
_lowerCamelCase : int = True
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : str = LlamaForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
# first forward pass
_lowerCamelCase : Dict = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , use_cache=lowercase , )
_lowerCamelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : int = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : Any = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , output_hidden_states=lowercase , )['hidden_states'][0]
_lowerCamelCase : Dict = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , past_key_values=lowercase , output_hidden_states=lowercase , )['hidden_states'][0]
# select random slice
_lowerCamelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def A_ ( self ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : List[str] = config_and_inputs
_lowerCamelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Optional[Any] = LlamaModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Any = type
self.model_tester.create_and_check_model(*lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[str] = 3
_lowerCamelCase : Dict = input_dict['input_ids']
_lowerCamelCase : List[str] = input_ids.ne(1 ).to(lowercase )
_lowerCamelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = LlamaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = 3
_lowerCamelCase : List[str] = 'single_label_classification'
_lowerCamelCase : Union[str, Any] = input_dict['input_ids']
_lowerCamelCase : List[Any] = input_ids.ne(1 ).to(lowercase )
_lowerCamelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : List[Any] = LlamaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : int = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = 3
_lowerCamelCase : Union[str, Any] = 'multi_label_classification'
_lowerCamelCase : Dict = input_dict['input_ids']
_lowerCamelCase : Optional[Any] = input_ids.ne(1 ).to(lowercase )
_lowerCamelCase : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : Optional[Any] = LlamaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : int = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def A_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def A_ ( self , lowercase ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size )
_lowerCamelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Union[str, Any] = LlamaModel(lowercase )
original_model.to(lowercase )
original_model.eval()
_lowerCamelCase : str = original_model(lowercase ).last_hidden_state
_lowerCamelCase : int = original_model(lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : List[str] = {'type': scaling_type, 'factor': 10.0}
_lowerCamelCase : Optional[int] = LlamaModel(lowercase )
scaled_model.to(lowercase )
scaled_model.eval()
_lowerCamelCase : int = scaled_model(lowercase ).last_hidden_state
_lowerCamelCase : Union[str, Any] = scaled_model(lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_lowerCamelCase : Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowerCamelCase : List[str] = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Any = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def A_ ( self ):
_lowerCamelCase : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_lowerCamelCase : List[Any] = model(torch.tensor(lowercase ) )
# Expected mean on dim = -1
_lowerCamelCase : Tuple = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Dict = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_lowerCamelCase : int = model(torch.tensor(lowercase ) )
# Expected mean on dim = -1
_lowerCamelCase : str = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Union[str, Any] = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowercase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : int = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_lowerCamelCase : Union[str, Any] = model(torch.tensor(lowercase ) )
_lowerCamelCase : Optional[Any] = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowercase , atol=1E-2 , rtol=1E-2 )
# fmt: off
_lowerCamelCase : List[str] = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_lowerCamelCase : str = 'Simply put, the theory of relativity states that '
_lowerCamelCase : List[Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_lowerCamelCase : List[str] = tokenizer.encode(lowercase , return_tensors='pt' )
_lowerCamelCase : List[str] = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowercase )
# greedy generation outputs
_lowerCamelCase : Optional[int] = model.generate(lowercase , max_new_tokens=64 , top_p=lowercase , temperature=1 , do_sample=lowercase )
_lowerCamelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
| 630
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _SCREAMING_SNAKE_CASE ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : list[int] , __snake_case : int , ):
'''simple docstring'''
lowercase , lowercase = coefficient_matrix.shape
lowercase , lowercase = constant_matrix.shape
if rowsa != colsa:
lowercase = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(__snake_case )
if colsa != 1:
lowercase = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(__snake_case )
if rowsa != rowsa:
lowercase = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(__snake_case )
if len(__snake_case ) != rowsa:
lowercase = (
'Number of initial values must be equal to number of rows in coefficient '
f'matrix but received {len(__snake_case )} and {rowsa}'
)
raise ValueError(__snake_case )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
lowercase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase , lowercase = table.shape
strictly_diagonally_dominant(__snake_case )
# Iterates the whole matrix for given number of times
for _ in range(__snake_case ):
lowercase = []
for row in range(__snake_case ):
lowercase = 0
for col in range(__snake_case ):
if col == row:
lowercase = table[row][col]
elif col == cols - 1:
lowercase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase = (temp + val) / denom
new_val.append(__snake_case )
lowercase = new_val
return [float(__snake_case ) for i in new_val]
def _SCREAMING_SNAKE_CASE ( __snake_case : NDArray[floataa] ):
'''simple docstring'''
lowercase , lowercase = table.shape
lowercase = True
for i in range(0 , __snake_case ):
lowercase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
"""simple docstring"""
import math
import unittest
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def UpperCamelCase_ ( self ):
with self.assertRaises(_lowerCamelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 134
| 0
|
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> bool:
'''simple docstring'''
UpperCAmelCase__ : Any = len(__lowerCamelCase )
UpperCAmelCase__ : int = len(__lowerCamelCase )
UpperCAmelCase__ : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCAmelCase__ : List[str] = True
for i in range(__lowerCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCAmelCase__ : Any = True
if a[i].islower():
UpperCAmelCase__ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Dict = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = BertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = do_lower_case
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
| 205
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def _snake_case ( __snake_case , __snake_case=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_UpperCamelCase = n - 1
_UpperCamelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_UpperCamelCase = 0
while count < prec:
_UpperCamelCase = random.randint(2 , n - 1 )
_UpperCamelCase = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
_UpperCamelCase = True
for _ in range(__snake_case ):
if b == n - 1:
_UpperCamelCase = False
break
_UpperCamelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCAmelCase = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 702
|
from __future__ import annotations
import math
class lowerCAmelCase_ :
def __init__( self : int , _A : int ):
_UpperCamelCase = size
# approximate the overall size of segment tree with given value
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
_UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase_ ( self : str , _A : int ):
return idx * 2
def UpperCamelCase_ ( self : Any , _A : int ):
return idx * 2 + 1
def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ):
if left_element == right_element:
_UpperCamelCase = a[left_element - 1]
else:
_UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_A ) , _A , _A , _A )
self.build(self.right(_A ) , mid + 1 , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCamelCase = val
if left_element != right_element:
_UpperCamelCase = val
_UpperCamelCase = val
_UpperCamelCase = True
_UpperCamelCase = True
return True
_UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_A ) , _A , _A , _A , _A , _A )
self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
return True
def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCamelCase = (left_element + right_element) // 2
_UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A )
_UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A )
return max(_A , _A )
def __str__( self : Tuple ):
return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCAmelCase = 15
_lowerCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 71
| 0
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = {}
def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = super().add_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=1 , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
else:
_lowerCAmelCase = []
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = placeholder_token + f'''_{i}'''
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
_lowerCAmelCase = output
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1.0 ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = []
for i in range(len(_lowerCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCAmelCase = self.token_map[placeholder_token]
_lowerCAmelCase = tokens[: 1 + int(len(_lowerCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
_lowerCAmelCase = copy.copy(_lowerCAmelCase )
random.shuffle(_lowerCAmelCase )
_lowerCAmelCase = text.replace(_lowerCAmelCase , " ".join(_lowerCAmelCase ) )
return text
def __call__( self , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1.0 , **_lowerCAmelCase ) -> Optional[int]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1.0 , **_lowerCAmelCase ) -> Tuple:
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
| 18
|
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> bool:
if num < 0:
return False
__SCREAMING_SNAKE_CASE : int = num
__SCREAMING_SNAKE_CASE : int = 0
while num > 0:
__SCREAMING_SNAKE_CASE : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
| 0
|
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __snake_case ( __lowerCAmelCase ):
a__ = DistilBertTokenizer
a__ = DistilBertTokenizerFast
a__ = True
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Tuple = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
a__: List[str] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase)
a__: Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase)
a__: List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase)
a__: Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 217
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 217
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
return "".join(chr(ord(__UpperCAmelCase ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 159
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_A = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
SCREAMING_SNAKE_CASE__ = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
SCREAMING_SNAKE_CASE__ = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 159
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
lowerCAmelCase__ :Optional[Any] = 1024
lowerCAmelCase__ :Tuple = 4096
lowerCAmelCase__ :Optional[Any] = 24
lowerCAmelCase__ :Optional[Any] = 16
lowerCAmelCase__ :Union[str, Any] = [5, 11, 17, 23]
lowerCAmelCase__ :Union[str, Any] = [256, 512, 1024, 1024]
lowerCAmelCase__ :Optional[int] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase__ :Tuple = 768
lowerCAmelCase__ :str = [1, 1, 1, 0.5]
lowerCAmelCase__ :List[str] = [256, 512, 768, 768]
lowerCAmelCase__ :int = 150
lowerCAmelCase__ :Any = 16
lowerCAmelCase__ :Tuple = (1, 384, 384)
lowerCAmelCase__ :int = False
lowerCAmelCase__ :Tuple = 'project'
if "ade" in checkpoint_url:
lowerCAmelCase__ :List[str] = True
lowerCAmelCase__ :Tuple = 768
lowerCAmelCase__ :List[str] = [1, 1, 1, 0.5]
lowerCAmelCase__ :List[Any] = 150
lowerCAmelCase__ :int = 16
lowerCAmelCase__ :int = 'huggingface/label-files'
lowerCAmelCase__ :List[str] = 'ade20k-id2label.json'
lowerCAmelCase__ :Tuple = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase__ :Tuple = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :int = idalabel
lowerCAmelCase__ :Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ :str = [1, 150, 480, 480]
return config, expected_shape
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase__ :Dict = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCAmelCase__ :Any = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('patch_embed' , '' )
if "pos_embed" in name:
lowerCAmelCase__ :Tuple = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCAmelCase__ :Any = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCAmelCase__ :Optional[Any] = name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCAmelCase__ :Any = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCAmelCase__ :List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase__ :Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase__ :Any = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCAmelCase__ :List[Any] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCAmelCase__ :List[Any] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCAmelCase__ :Optional[int] = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCAmelCase__ :Optional[int] = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCAmelCase__ :str = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCAmelCase__ :int = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCAmelCase__ :str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase__ :Tuple = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowerCAmelCase__ :List[str] = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCAmelCase__ :Dict = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCAmelCase__ :List[str] = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCAmelCase__ :str = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase__ :List[Any] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase__ :List[Any] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase__ :List[Any] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase__ :Any = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase__ :str = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase__ :Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase__ :int = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase__ :str = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase__ :List[str] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCAmelCase__ :Dict = name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCAmelCase__ :Dict = name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCAmelCase__ :Any = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCAmelCase__ :List[Any] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
lowerCAmelCase__ :Tuple = name.replace('..' , '.' )
if "stem.conv" in name:
lowerCAmelCase__ :str = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCAmelCase__ :int = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
lowerCAmelCase__ :Optional[int] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
lowerCAmelCase__ :int = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase__ :List[str] = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase__ :str = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ :Any = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowerCAmelCase__ :Any = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ :List[str] = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase__ :Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ :Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ :List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ :Optional[int] = in_proj_bias[-config.hidden_size :]
def __A () ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ :Dict = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = get_dpt_config(_SCREAMING_SNAKE_CASE )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase__ :Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase__ :List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = val
# read in qkv matrices
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCAmelCase__ :Dict = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
lowerCAmelCase__ :Optional[Any] = 480 if 'ade' in checkpoint_url else 384
lowerCAmelCase__ :str = DPTImageProcessor(size=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = prepare_img()
lowerCAmelCase__ :Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# forward pass
lowerCAmelCase__ :List[Any] = model(**_SCREAMING_SNAKE_CASE ).logits if 'ade' in checkpoint_url else model(**_SCREAMING_SNAKE_CASE ).predicted_depth
if show_prediction:
lowerCAmelCase__ :str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=_SCREAMING_SNAKE_CASE , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
__A = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 560
|
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
if tokenize_kwargs is None:
lowerCAmelCase__ :List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowerCAmelCase__ :List[str] = truncation
lowerCAmelCase__ :Union[str, Any] = tokenize_kwargs
lowerCAmelCase__ :List[str] = {}
if return_tensors is not None:
lowerCAmelCase__ :List[str] = return_tensors
return preprocess_params, {}, postprocess_params
def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.framework
lowerCAmelCase__ :Optional[Any] = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
return model_inputs
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
| 560
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase_ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
lowerCAmelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Dict = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=None , _UpperCamelCase : str=None , _UpperCamelCase : Any="</s>" , _UpperCamelCase : int="</s>" , _UpperCamelCase : List[Any]="<s>" , _UpperCamelCase : Optional[int]="<unk>" , _UpperCamelCase : Optional[Any]="<pad>" , _UpperCamelCase : Tuple="<mask>" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : List[Any] , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model )
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = src_lang if src_lang is not None else '''en_XX'''
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case__( self : Optional[int] ) ->int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case__( self : Optional[int] ) ->str:
return self._src_lang
@src_lang.setter
def snake_case__( self : Any , _UpperCamelCase : str ) ->None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ) ->Dict:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Dict ) ->None:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Any ) ->Dict:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__( self : List[Any] , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__( self : List[Any] , _UpperCamelCase : int ) ->str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__( self : str , _UpperCamelCase : Optional[int] ) ->Any:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def snake_case__( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
snake_case_ = [1] * len(self.prefix_tokens )
snake_case_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def snake_case__( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__( self : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] , _UpperCamelCase : Optional[str] , **_UpperCamelCase : Dict ) ->List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
snake_case_ = src_lang
snake_case_ = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = self.convert_tokens_to_ids(_UpperCamelCase )
snake_case_ = tgt_lang_id
return inputs
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str = "en_XX" , _UpperCamelCase : Optional[List[str]] = None , _UpperCamelCase : str = "ro_RO" , **_UpperCamelCase : int , ) ->BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict ) ->Optional[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case__( self : List[str] ) ->str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__( self : Optional[Any] , _UpperCamelCase : str ) ->None:
snake_case_ = self.lang_code_to_id[src_lang]
snake_case_ = [self.cur_lang_code_id]
snake_case_ = [self.eos_token_id]
def snake_case__( self : List[str] , _UpperCamelCase : str ) ->None:
snake_case_ = self.lang_code_to_id[tgt_lang]
snake_case_ = [self.cur_lang_code_id]
snake_case_ = [self.eos_token_id]
| 39
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = DebertaTokenizer
A__ = True
A__ = DebertaTokenizerFast
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
__snake_case : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : Any = {'unk_token': '[UNK]'}
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def A_ ( self : Optional[int] , **__a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] , __a : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : List[Any] = 'lower newer'
return input_text, output_text
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : str = self.get_tokenizer()
__snake_case : List[str] = 'lower newer'
__snake_case : Tuple = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case : Optional[int] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__snake_case : str = tokens + [tokenizer.unk_token]
__snake_case : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = tokenizer('Hello' , 'World' )
__snake_case : int = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , __a )
@slow
def A_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case : str = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
__snake_case : str = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__snake_case : Any = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__snake_case : List[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=__a , add_prefix_space=__a )
__snake_case : Union[str, Any] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__a , add_prefix_space=__a )
__snake_case : int = tokenizer.build_inputs_with_special_tokens(__a )
__snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__snake_case : List[Any] = tokenizer_class.from_pretrained('microsoft/deberta-base' )
__snake_case : int = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
__snake_case : Union[str, Any] = tokenizer(__a , padding=__a )
__snake_case : List[str] = [tokenizer.decode(__a , skip_special_tokens=__a ) for seq in encoding['input_ids']]
# fmt: off
__snake_case : Optional[Any] = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__snake_case : int = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , __a )
for expected, decoded in zip(__a , __a ):
self.assertEqual(__a , __a )
| 286
| 0
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_lowerCamelCase : Union[str, Any] = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8000,
"sample_size": 13_1072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
}
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
return torch.atana(A__ , A__ ) / math.pi * 2
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = torch.sin(t * math.pi / 2 ) ** 2
UpperCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(A__ , A__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
super().__init__()
UpperCamelCase = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 )
UpperCamelCase = deepcopy(self.diffusion )
UpperCamelCase = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ )
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
_lowerCamelCase : str = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
_lowerCamelCase : List[Any] = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
_lowerCamelCase : Optional[int] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
_lowerCamelCase : Optional[int] = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
_lowerCamelCase : Any = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
_lowerCamelCase : str = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(A__ ) and not isinstance(A__ , A__ ):
return name.replace(A__ , A__ )
elif name.startswith(A__ ):
return [name.replace(A__ , A__ ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def __lowerCamelCase ( A__ , A__=13 ) -> str:
"""simple docstring"""
UpperCamelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
UpperCamelCase = 0
if string.startswith('net.3.' ):
depth += 1
UpperCamelCase = string[6:]
elif string.startswith('net.' ):
UpperCamelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
UpperCamelCase = string[7:]
if string.startswith('main.' ):
UpperCamelCase = string[5:]
# mid block
if string[:2].isdigit():
UpperCamelCase = string[:2]
UpperCamelCase = string[2:]
else:
UpperCamelCase = string[0]
UpperCamelCase = string[1:]
if depth == max_depth:
UpperCamelCase = MID_NUM_TO_LAYER[layer_num]
UpperCamelCase = 'mid_block'
elif depth > 0 and int(A__ ) < 7:
UpperCamelCase = DOWN_NUM_TO_LAYER[layer_num]
UpperCamelCase = F"""down_blocks.{depth}"""
elif depth > 0 and int(A__ ) > 7:
UpperCamelCase = UP_NUM_TO_LAYER[layer_num]
UpperCamelCase = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
UpperCamelCase = DEPTH_0_TO_LAYER[layer_num]
UpperCamelCase = F"""up_blocks.{max_depth - 1}""" if int(A__ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
UpperCamelCase = string_left[1:]
if "resnets" in new_layer:
UpperCamelCase = convert_resconv_naming(A__ )
elif "attentions" in new_layer:
UpperCamelCase = convert_attn_naming(A__ )
UpperCamelCase = new_string_left
if not isinstance(A__ , A__ ):
UpperCamelCase = prefix + '.' + new_layer + '.' + string_left
else:
UpperCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
UpperCamelCase = rename(A__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(A__ , A__ ):
UpperCamelCase = transform_conv_attns(A__ , A__ , A__ )
else:
UpperCamelCase = v
return new_state_dict
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
if len(A__ ) == 1:
if len(v.shape ) == 3:
# weight
UpperCamelCase = v[:, :, 0]
else:
# bias
UpperCamelCase = v
else:
# qkv matrices
UpperCamelCase = v.shape[0]
UpperCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCamelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
UpperCamelCase = download(A__ )
UpperCamelCase = MODELS_MAP[model_name]['sample_rate']
UpperCamelCase = MODELS_MAP[model_name]['sample_size']
UpperCamelCase = Object()
UpperCamelCase = sample_size
UpperCamelCase = sample_rate
UpperCamelCase = 0
UpperCamelCase = UNetaDModel(sample_size=A__ , sample_rate=A__ )
UpperCamelCase = diffusers_model.state_dict()
UpperCamelCase = DiffusionUncond(A__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=A__ )['state_dict'] )
UpperCamelCase = orig_model.diffusion_ema.eval()
UpperCamelCase = orig_model.state_dict()
UpperCamelCase = rename_orig_weights(A__ )
UpperCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(A__ ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(A__ ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
UpperCamelCase = value.squeeze()
UpperCamelCase = value
diffusers_model.load_state_dict(A__ )
UpperCamelCase = 100
UpperCamelCase = 33
UpperCamelCase = IPNDMScheduler(num_train_timesteps=A__ )
UpperCamelCase = torch.manual_seed(A__ )
UpperCamelCase = torch.randn([1, 2, config.sample_size] , generator=A__ ).to(A__ )
UpperCamelCase = torch.linspace(1 , 0 , steps + 1 , device=A__ )[:-1]
UpperCamelCase = get_crash_schedule(A__ )
UpperCamelCase = DanceDiffusionPipeline(unet=A__ , scheduler=A__ )
UpperCamelCase = torch.manual_seed(33 )
UpperCamelCase = pipe(num_inference_steps=A__ , generator=A__ ).audios
UpperCamelCase = sampling.iplms_sample(A__ , A__ , A__ , {} )
UpperCamelCase = generated.clamp(-1 , 1 )
UpperCamelCase = (generated - audio).abs().sum()
UpperCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , A__ )
print('Diff max' , A__ )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
_lowerCamelCase : Any = parser.parse_args()
main(args)
| 324
|
'''simple docstring'''
# Imports
import numpy as np
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Dict=None ):
"""simple docstring"""
if red is not None:
UpperCamelCase = red
if green is not None:
UpperCamelCase = green
if blue is not None:
UpperCamelCase = blue
if red_edge is not None:
UpperCamelCase = red_edge
if nir is not None:
UpperCamelCase = nir
return True
def A ( self : List[str] , UpperCamelCase__ : int="" , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Any=None ):
"""simple docstring"""
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
UpperCamelCase = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def A ( self : Optional[int] ):
"""simple docstring"""
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def A ( self : int ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A ( self : Optional[Any] ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def A ( self : int ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A ( self : int ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def A ( self : Union[str, Any] ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def A ( self : Optional[int] ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A ( self : int ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def A ( self : int ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A ( self : Dict ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A ( self : str ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A ( self : Optional[int] ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A ( self : Optional[Any] , UpperCamelCase__ : str=0.0_8 , UpperCamelCase__ : Any=1.2_2 , UpperCamelCase__ : Tuple=0.0_3 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A ( self : Any ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A ( self : Any ):
"""simple docstring"""
return (self.nir / self.green) - 1
def A ( self : Optional[Any] ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def A ( self : Optional[Any] ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A ( self : Optional[int] ):
"""simple docstring"""
return self.nir - self.green
def A ( self : Any ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def A ( self : List[str] , UpperCamelCase__ : List[str]=0.1_6 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def A ( self : int , UpperCamelCase__ : Tuple=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A ( self : Optional[int] ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def A ( self : int , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def A ( self : Tuple ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A ( self : int ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 3_0.5
def A ( self : List[str] ):
"""simple docstring"""
return self.nir / self.red
def A ( self : List[str] ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def A ( self : Any ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A ( self : List[Any] ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def A ( self : Tuple ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def A ( self : Optional[int] ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def A ( self : Tuple ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def A ( self : Tuple ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCamelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A ( self : Union[str, Any] ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A ( self : List[Any] ):
"""simple docstring"""
return self.nir / self.red
def A ( self : Dict ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def A ( self : List[Any] ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 324
| 1
|
'''simple docstring'''
def lowerCamelCase ( _snake_case : int ):
'''simple docstring'''
return str(_snake_case ) == str(_snake_case )[::-1]
def lowerCamelCase ( _snake_case : int ):
'''simple docstring'''
return int(_snake_case ) + int(str(_snake_case )[::-1] )
def lowerCamelCase ( _snake_case : int = 10_000 ):
'''simple docstring'''
lowercase__ = []
for num in range(1 ,_snake_case ):
lowercase__ = 0
lowercase__ = num
while iterations < 50:
lowercase__ = sum_reverse(_snake_case )
iterations += 1
if is_palindrome(_snake_case ):
break
else:
lychrel_nums.append(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 267
|
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case (unittest.TestCase ):
lowerCAmelCase__ :Dict = JukeboxTokenizer
lowerCAmelCase__ :List[str] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _a ( self ) -> Dict:
import torch
lowercase__ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowercase__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase__ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def _a ( self ) -> Optional[Any]:
import torch
lowercase__ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowercase__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase__ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 267
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ :List[Any] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Dict = 'sew-d'
def __init__( self : Tuple , A__ : Optional[int]=32 , A__ : Optional[int]=768 , A__ : Any=12 , A__ : List[str]=12 , A__ : List[Any]=3072 , A__ : str=2 , A__ : Dict=512 , A__ : Optional[Any]=256 , A__ : Optional[Any]=True , A__ : Any=True , A__ : List[str]=("p2c", "c2p") , A__ : List[Any]="layer_norm" , A__ : Union[str, Any]="gelu_python" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : int=0.1 , A__ : Dict=0.0 , A__ : Optional[Any]=0.1 , A__ : Dict=0.02 , A__ : Dict=1e-7 , A__ : List[Any]=1e-5 , A__ : Any="group" , A__ : Any="gelu" , A__ : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A__ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__ : str=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__ : Optional[Any]=False , A__ : Any=128 , A__ : Optional[Any]=16 , A__ : Union[str, Any]=True , A__ : Any=0.05 , A__ : List[str]=10 , A__ : Union[str, Any]=2 , A__ : Dict=0.0 , A__ : str=10 , A__ : Tuple=0 , A__ : Any="mean" , A__ : Optional[int]=False , A__ : int=False , A__ : List[str]=256 , A__ : Union[str, Any]=0 , A__ : int=1 , A__ : Optional[Any]=2 , **A__ : List[Any] , ):
"""simple docstring"""
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Tuple = feat_extract_norm
__lowerCamelCase : Tuple = feat_extract_activation
__lowerCamelCase : List[Any] = list(A__ )
__lowerCamelCase : int = list(A__ )
__lowerCamelCase : Optional[Any] = list(A__ )
__lowerCamelCase : Tuple = conv_bias
__lowerCamelCase : List[str] = num_conv_pos_embeddings
__lowerCamelCase : Tuple = num_conv_pos_embedding_groups
__lowerCamelCase : int = len(self.conv_dim )
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = squeeze_factor
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : int = position_buckets
__lowerCamelCase : Tuple = share_att_key
__lowerCamelCase : Any = relative_attention
__lowerCamelCase : Any = norm_rel_ebd
__lowerCamelCase : Dict = list(A__ )
__lowerCamelCase : Optional[Any] = hidden_act
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : List[str] = hidden_dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Union[str, Any] = feat_proj_dropout
__lowerCamelCase : Union[str, Any] = final_dropout
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Tuple = feature_layer_norm_eps
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : List[str] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase : Any = apply_spec_augment
__lowerCamelCase : str = mask_time_prob
__lowerCamelCase : Tuple = mask_time_length
__lowerCamelCase : Optional[int] = mask_time_min_masks
__lowerCamelCase : Dict = mask_feature_prob
__lowerCamelCase : Optional[Any] = mask_feature_length
__lowerCamelCase : str = mask_feature_min_masks
# ctc loss
__lowerCamelCase : Any = ctc_loss_reduction
__lowerCamelCase : str = ctc_zero_infinity
# sequence classification
__lowerCamelCase : Dict = use_weighted_layer_sum
__lowerCamelCase : List[Any] = classifier_proj_size
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 483
|
'''simple docstring'''
from statistics import mean
import numpy as np
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase ) -> list:
"""simple docstring"""
__lowerCamelCase : str = 0
# Number of processes finished
__lowerCamelCase : List[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__lowerCamelCase : int = [0] * no_of_process
# List to include calculation results
__lowerCamelCase : Optional[int] = [0] * no_of_process
# Sort by arrival time.
__lowerCamelCase : int = [burst_time[i] for i in np.argsort(_lowercase )]
__lowerCamelCase : Union[str, Any] = [process_name[i] for i in np.argsort(_lowercase )]
arrival_time.sort()
while no_of_process > finished_process_count:
__lowerCamelCase : Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__lowerCamelCase : List[str] = arrival_time[i]
__lowerCamelCase : Dict = 0
# Index showing the location of the process being performed
__lowerCamelCase : Optional[Any] = 0
# Saves the current response ratio.
__lowerCamelCase : List[str] = 0
for i in range(0, _lowercase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__lowerCamelCase : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__lowerCamelCase : List[Any] = temp
__lowerCamelCase : str = i
# Calculate the turn around time
__lowerCamelCase : Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__lowerCamelCase : Optional[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase ) -> list:
"""simple docstring"""
__lowerCamelCase : List[Any] = [0] * no_of_process
for i in range(0, _lowercase ):
__lowerCamelCase : Tuple = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCAmelCase__ :List[Any] = 5
UpperCAmelCase__ :Tuple = ["""A""", """B""", """C""", """D""", """E"""]
UpperCAmelCase__ :Tuple = [1, 2, 3, 4, 5]
UpperCAmelCase__ :List[Any] = [1, 2, 3, 4, 5]
UpperCAmelCase__ :Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCAmelCase__ :Optional[int] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 483
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A_ : Any = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __snake_case ( __A : int , __A : int , __A : Dict , __A : List[Any] , __A : int , __A : Union[str, Any] , __A : Dict , __A : Dict=False , ) -> int:
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , use_external_data_format=SCREAMING_SNAKE_CASE_ , enable_onnx_checker=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , )
else:
export(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def __snake_case ( __A : List[Any] , __A : Optional[Any] , __A : Any , __A : List[Any] = False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
SCREAMING_SNAKE_CASE : List[Any] = "cpu"
SCREAMING_SNAKE_CASE : List[str] = Path(SCREAMING_SNAKE_CASE_ )
# VAE DECODER
SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL.from_pretrained(model_path + '/vae' )
SCREAMING_SNAKE_CASE : Dict = vae_decoder.config.latent_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE : List[Any] = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE_ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE_ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE_ , )
del vae_decoder
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
A_ : Union[str, Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 265
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340
| 0
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowercase_ : List[str] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : str ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = _TestCommandArgs(dataset=lowercase_ , all_configs=lowercase_ , save_infos=lowercase_ )
lowercase = TestCommand(*lowercase_ )
test_command.run()
lowercase = os.path.join(lowercase_ , """README.md""" )
assert os.path.exists(lowercase_ )
lowercase = DatasetInfosDict.from_directory(lowercase_ )
lowercase = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 235_1563,
"""num_examples""": 1_0000,
},
{
"""name""": """validation""",
"""num_bytes""": 23_8418,
"""num_examples""": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase , lowercase = getattr(dataset_infos["""default"""] , lowercase_ ), getattr(expected_dataset_infos["""default"""] , lowercase_ )
if key == "num_bytes":
assert is_apercent_close(lowercase_ , lowercase_ )
elif key == "splits":
assert list(lowercase_ ) == list(lowercase_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 700
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653
| 0
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
UpperCamelCase__ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ : Tuple = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowerCAmelCase )
if decoder_head_mask is None:
UpperCamelCase__ : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCAmelCase )
if cross_attn_head_mask is None:
UpperCamelCase__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=13 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Optional[Any]=99 , SCREAMING_SNAKE_CASE : Tuple=16 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[Any]="relu" , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Dict=20 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : Tuple=0 , ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : str = seq_length
UpperCamelCase__ : List[Any] = is_training
UpperCamelCase__ : Optional[Any] = use_labels
UpperCamelCase__ : Union[str, Any] = vocab_size
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = encoder_layerdrop
UpperCamelCase__ : List[Any] = decoder_layerdrop
UpperCamelCase__ : List[str] = max_position_embeddings
UpperCamelCase__ : Optional[int] = eos_token_id
UpperCamelCase__ : str = pad_token_id
UpperCamelCase__ : List[str] = bos_token_id
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : str = self.eos_token_id # Eos Token
UpperCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ : Dict = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : List[Any] = self.get_config()
UpperCamelCase__ : Dict = prepare_mam_aaa_inputs_dict(_a , _a , _a )
return config, inputs_dict
def __lowercase ( self : List[str] ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = MaMaaaModel(config=_a ).get_decoder().to(_a ).eval()
UpperCamelCase__ : str = inputs_dict["""input_ids"""]
UpperCamelCase__ : Optional[int] = inputs_dict["""attention_mask"""]
UpperCamelCase__ : Union[str, Any] = inputs_dict["""head_mask"""]
# first forward pass
UpperCamelCase__ : Union[str, Any] = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
UpperCamelCase__ : Optional[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ : Optional[Any] = model(_a , attention_mask=_a )["""last_hidden_state"""]
UpperCamelCase__ : List[Any] = model(_a , attention_mask=_a , past_key_values=_a )[
"""last_hidden_state"""
]
# select random slice
UpperCamelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-2 ) )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = MaMaaaModel(config=_a ).to(_a ).eval()
UpperCamelCase__ : List[str] = model(**_a )
UpperCamelCase__ : List[Any] = outputs.encoder_last_hidden_state
UpperCamelCase__ : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(_a )
UpperCamelCase__ : int = MaMaaaEncoder.from_pretrained(_a ).to(_a )
UpperCamelCase__ : Optional[int] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Optional[Any] = model.get_decoder()
decoder.save_pretrained(_a )
UpperCamelCase__ : Any = MaMaaaDecoder.from_pretrained(_a ).to(_a )
UpperCamelCase__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=_a , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __a ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_lowerCAmelCase : Dict = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[int] = False
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = MaMaaaModelTester(self )
UpperCamelCase__ : Optional[Any] = ConfigTester(self , config_class=_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
UpperCamelCase__ : Any = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info["missing_keys"] , [] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase__ : int = model_class(_a )
model.to(_a )
model.eval()
UpperCamelCase__ : str = copy.deepcopy(self._prepare_for_class(_a , _a ) )
if not self.is_encoder_decoder:
UpperCamelCase__ : List[Any] = inputs["""input_ids"""]
del inputs["input_ids"]
else:
UpperCamelCase__ : str = inputs["""input_ids"""]
UpperCamelCase__ : Dict = inputs.get("decoder_input_ids" , _a )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , _a )
UpperCamelCase__ : str = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase__ : int = wte(_a )
else:
UpperCamelCase__ : Dict = wte(_a )
UpperCamelCase__ : str = wte(_a )
with torch.no_grad():
model(**_a )[0]
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Dict = input_dict["""input_ids"""]
UpperCamelCase__ : List[str] = input_ids.ne(1 ).to(_a )
UpperCamelCase__ : str = MaMaaaForConditionalGeneration(_a ).eval().to(_a )
if torch_device == "cuda":
model.half()
model.generate(_a , attention_mask=_a )
model.generate(num_beams=4 , do_sample=_a , early_stopping=_a , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
return torch.tensor(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase )
lowerCamelCase : Dict =1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __a ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Dict ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(_a )
UpperCamelCase__ : Tuple = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ : List[str] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ : Tuple = prepare_mam_aaa_inputs_dict(model.config , _a , _a )
with torch.no_grad():
UpperCamelCase__ : int = model(**_a )[0]
UpperCamelCase__ : List[Any] = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ : List[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=_a )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(_a )
# change to intended input
UpperCamelCase__ : Tuple = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ : Any = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ : List[Any] = prepare_mam_aaa_inputs_dict(model.config , _a , _a )
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**_a )[0]
UpperCamelCase__ : Union[str, Any] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ : str = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=_a )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=_a ) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(_a )
UpperCamelCase__ : List[Any] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
UpperCamelCase__ : str = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase__ : str = tokenizer(_a , padding=_a , return_tensors="pt" )
UpperCamelCase__ : Union[str, Any] = model.generate(
input_ids=dct["input_ids"].to(_a ) , attention_mask=dct["attention_mask"].to(_a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
UpperCamelCase__ : Tuple = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
UpperCamelCase__ : str = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_a , skip_special_tokens=_a )
assert generated == expected_en
| 228
|
"""simple docstring"""
import numpy as np
import qiskit
def _lowercase ( __lowerCAmelCase = 8 , __lowerCAmelCase = None ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = np.random.default_rng(seed=__lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE__ : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE__ : List[Any] = rng.integers(2 , size=__lowerCAmelCase )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE__ : Optional[Any] = rng.integers(2 , size=__lowerCAmelCase )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE__ : str = rng.integers(2 , size=__lowerCAmelCase )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qiskit.QuantumCircuit(__lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE__ : str = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE__ : Optional[int] = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE__ : int = job.result().get_counts(__lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE__ : Optional[int] = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 680
| 0
|
import tensorflow as tf
from ...tf_utils import shape_list
class UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1 , UpperCamelCase_=False , **UpperCamelCase_ ):
super().__init__(**UpperCamelCase_ )
lowercase_ :str = vocab_size
lowercase_ :Optional[int] = d_embed
lowercase_ :List[str] = d_proj
lowercase_ :Optional[Any] = cutoffs + [vocab_size]
lowercase_ :Optional[int] = [0] + self.cutoffs
lowercase_ :int = div_val
lowercase_ :str = self.cutoffs[0]
lowercase_ :Union[str, Any] = len(self.cutoffs ) - 1
lowercase_ :Dict = self.shortlist_size + self.n_clusters
lowercase_ :List[Any] = keep_order
lowercase_ :Dict = []
lowercase_ :Optional[int] = []
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.n_clusters > 0:
lowercase_ :Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=UpperCamelCase_ , name='''cluster_weight''' )
lowercase_ :Dict = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=UpperCamelCase_ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase_ :Dict = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=f"out_projs_._{i}" , )
self.out_projs.append(UpperCamelCase_ )
else:
self.out_projs.append(UpperCamelCase_ )
lowercase_ :List[str] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=f"out_layers_._{i}_._weight" , )
lowercase_ :List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase_ :int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ :Tuple = self.d_embed // (self.div_val**i)
lowercase_ :Union[str, Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=f"out_projs_._{i}" )
self.out_projs.append(UpperCamelCase_ )
lowercase_ :Dict = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=f"out_layers_._{i}_._weight" , )
lowercase_ :Dict = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=UpperCamelCase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase_ )
@staticmethod
def UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
lowercase_ :Optional[int] = x
if proj is not None:
lowercase_ :str = tf.einsum('''ibd,ed->ibe''' , UpperCamelCase_ , UpperCamelCase_ )
return tf.einsum('''ibd,nd->ibn''' , UpperCamelCase_ , UpperCamelCase_ ) + b
@staticmethod
def UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[str] = shape_list(UpperCamelCase_ )
lowercase_ :Optional[int] = tf.range(lp_size[0] , dtype=target.dtype )
lowercase_ :str = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True , UpperCamelCase_=False ):
lowercase_ :Optional[Any] = 0
if self.n_clusters == 0:
lowercase_ :Any = self._logit(UpperCamelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowercase_ :Optional[int] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase_ , logits=UpperCamelCase_ )
lowercase_ :str = tf.nn.log_softmax(UpperCamelCase_ , axis=-1 )
else:
lowercase_ :Tuple = shape_list(UpperCamelCase_ )
lowercase_ :Tuple = []
lowercase_ :List[str] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase_ :Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase_ :str = (target >= l_idx) & (target < r_idx)
lowercase_ :List[str] = tf.where(UpperCamelCase_ )
lowercase_ :int = tf.boolean_mask(UpperCamelCase_ , UpperCamelCase_ ) - l_idx
if self.div_val == 1:
lowercase_ :Optional[Any] = self.out_layers[0][0][l_idx:r_idx]
lowercase_ :Dict = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase_ :int = self.out_layers[i][0]
lowercase_ :List[Any] = self.out_layers[i][1]
if i == 0:
lowercase_ :Tuple = tf.concat([cur_W, self.cluster_weight] , 0 )
lowercase_ :Any = tf.concat([cur_b, self.cluster_bias] , 0 )
lowercase_ :Optional[Any] = self._logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.out_projs[0] )
lowercase_ :Any = tf.nn.log_softmax(UpperCamelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase_ :Tuple = tf.boolean_mask(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = self._gather_logprob(UpperCamelCase_ , UpperCamelCase_ )
else:
lowercase_ :int = self._logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.out_projs[i] )
lowercase_ :Optional[Any] = tf.nn.log_softmax(UpperCamelCase_ )
lowercase_ :Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase_ :Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase_ )
if target is not None:
lowercase_ :Dict = tf.boolean_mask(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Optional[Any] = tf.boolean_mask(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :str = self._gather_logprob(UpperCamelCase_ , UpperCamelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase_ , -cur_logprob , shape_list(UpperCamelCase_ ) )
lowercase_ :Any = tf.concat(UpperCamelCase_ , axis=-1 )
if target is not None:
if return_mean:
lowercase_ :Any = tf.reduce_mean(UpperCamelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase_ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 704
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=4 , ):
lowercase_ :Union[str, Any] = parent
lowercase_ :int = batch_size
lowercase_ :int = seq_length
lowercase_ :str = is_training
lowercase_ :Dict = use_attention_mask
lowercase_ :List[Any] = use_token_type_ids
lowercase_ :str = use_labels
lowercase_ :str = vocab_size
lowercase_ :Optional[int] = hidden_size
lowercase_ :Dict = num_hidden_layers
lowercase_ :List[str] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Union[str, Any] = hidden_act
lowercase_ :Optional[int] = hidden_dropout_prob
lowercase_ :Tuple = attention_probs_dropout_prob
lowercase_ :int = max_position_embeddings
lowercase_ :List[Any] = type_vocab_size
lowercase_ :Any = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :Any = num_choices
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[str] = None
if self.use_attention_mask:
lowercase_ :Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :List[Any] = None
if self.use_token_type_ids:
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = config_and_inputs
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase ( self ):
lowercase_ :Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Dict = config_and_inputs
lowercase_ :str = True
lowercase_ :Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : int =True
lowercase : Dict =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = FlaxBertModelTester(self )
@slow
def UpperCamelCase ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowercase_ :Dict = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowercase_ :str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 441
| 0
|
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
a__ : str = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
a__ : Optional[Any] = logging.WARNING
def __snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = os.getenv('''DATASETS_VERBOSITY''' , SCREAMING_SNAKE_CASE_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __snake_case ( ) -> str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def __snake_case ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def __snake_case ( ) -> None:
"""simple docstring"""
UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __snake_case ( ) -> None:
"""simple docstring"""
UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
UpperCAmelCase = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> None:
"""simple docstring"""
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> None:
"""simple docstring"""
UpperCAmelCase = False
def __snake_case ( ) -> None:
"""simple docstring"""
UpperCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , *a__ : str , **a__ : Optional[int] ): # pylint: disable=unused-argument
UpperCAmelCase = args[0] if args else None
def __iter__( self : List[str] ):
return iter(self._iterator )
def __getattr__( self : Optional[int] , a__ : str ):
def empty_fn(*a__ : str , **a__ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int ):
return self
def __exit__( self : List[Any] , a__ : Dict , a__ : str , a__ : List[Any] ):
return
a__ : Optional[Any] = True
class lowerCAmelCase__ :
'''simple docstring'''
def __call__( self : Any , *a__ : int , a__ : Any=False , **a__ : Union[str, Any] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*a__ , **a__ )
else:
return EmptyTqdm(*a__ , **a__ )
def __snake_case ( self : str , *a__ : Tuple , **a__ : Union[str, Any] ):
UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*a__ , **a__ )
def __snake_case ( self : Tuple ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a__ : Union[str, Any] = _tqdm_cls()
def __snake_case ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
global _tqdm_active
UpperCAmelCase = True
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
global _tqdm_active
UpperCAmelCase = False
| 51
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : str = '''imagegpt'''
__lowerCAmelCase : Tuple = ['''past_key_values''']
__lowerCAmelCase : Union[str, Any] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , a : str=512 + 1 , a : Optional[int]=32 * 32 , a : Optional[Any]=512 , a : List[Any]=24 , a : Union[str, Any]=8 , a : Any=None , a : Dict="quick_gelu" , a : Any=0.1 , a : List[str]=0.1 , a : Dict=0.1 , a : Dict=1E-5 , a : Optional[Any]=0.02 , a : List[str]=True , a : List[str]=True , a : Optional[int]=False , a : Union[str, Any]=False , a : Optional[Any]=False , **a : Tuple , ) -> Tuple:
"""simple docstring"""
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = scale_attn_by_inverse_layer_idx
lowercase = reorder_and_upcast_attn
lowercase = tie_word_embeddings
super().__init__(tie_word_embeddings=a , **a )
class _lowerCAmelCase ( __snake_case ):
@property
def _lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def _lowerCAmelCase ( self : Any , a : "FeatureExtractionMixin" , a : int = 1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , a : int = 3 , a : int = 32 , a : int = 32 , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase = self._generate_dummy_images(a , a , a , a )
lowercase = dict(preprocessor(images=a , return_tensors=a ) )
return inputs
| 396
|
"""simple docstring"""
def A_ ( __UpperCamelCase : list ):
for i in range(len(__UpperCamelCase ) - 1 , 0 , -1 ):
lowercase = False
for j in range(__UpperCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase , lowercase = unsorted[j - 1], unsorted[j]
lowercase = True
for j in range(__UpperCamelCase ):
if unsorted[j] > unsorted[j + 1]:
lowercase , lowercase = unsorted[j + 1], unsorted[j]
lowercase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 396
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( UpperCamelCase__ ):
_lowercase : str = ['''image_processor''', '''tokenizer''']
_lowercase : Any = '''CLIPImageProcessor'''
_lowercase : str = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self: str , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase_ , )
lowercase__ = kwargs.pop('''feature_extractor''' )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self: int , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Union[str, Any] ) -> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: int ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , *UpperCamelCase_: str , **UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 43
|
from __future__ import annotations
from collections import namedtuple
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
__lowercase : str = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149
| 0
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ : Dict = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ : Any = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase__ : Dict = bs[:]
UpperCAmelCase__ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Optional[int] = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def __UpperCamelCase( _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : int = char
return pairs
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int = ['''input_ids''', '''attention_mask''']
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_="replace" ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="<s>" ,lowerCamelCase_="<unk>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<mask>" ,lowerCamelCase_=False ,**lowerCamelCase_ ,) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase__ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase__ : Any = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase__ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase__ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase__ : List[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : List[str] = json.load(lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : Any = errors # how to handle errors in decoding
UpperCAmelCase__ : Optional[int] = bytes_to_unicode()
UpperCAmelCase__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase__ : Any = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase__ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Any = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : Optional[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Any = tuple(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ : Any = bigram
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase__ : Optional[Any] = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Any = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[Any] = tuple(lowerCamelCase_ )
UpperCAmelCase__ : Dict = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase__ : Union[str, Any] = get_pairs(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = ''' '''.join(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = word
return word
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ''''''.join(lowerCamelCase_ )
UpperCAmelCase__ : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' ,errors=self.errors )
return text
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ : Optional[int] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Optional[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + '''\n''' )
UpperCAmelCase__ : str = 0
with open(lowerCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase__ : str = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ,**lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : int = kwargs.pop('''add_prefix_space''' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Any = ''' ''' + text
return (text, kwargs)
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,) -> dict:
'''simple docstring'''
UpperCAmelCase__ : str = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : Dict = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : List[str] = len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase__ : List[str] = len(lowerCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Union[str, Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Optional[int] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 716
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __UpperCamelCase( _A : Union[str, Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCamelCase( _A : List[Any] , _A : Dict , _A : str , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''imagenet-1k-id2label.json'''
UpperCAmelCase__ : str = 10_00
UpperCAmelCase__ : str = '''huggingface/label-files'''
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Tuple = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase__ : Any = {int(_A ): v for k, v in idalabel.items()}
UpperCAmelCase__ : List[str] = idalabel
UpperCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
UpperCAmelCase__ : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
UpperCAmelCase__ : List[str] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase__ : Optional[int] = [2, 2, 20]
UpperCAmelCase__ : str = [3, 12, 16]
UpperCAmelCase__ : Union[str, Any] = [1_92, 7_68, 10_24]
UpperCAmelCase__ : Optional[int] = CvtForImageClassification(_A )
UpperCAmelCase__ : Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : Union[str, Any] = torch.load(_A , map_location=torch.device('''cpu''' ) )
UpperCAmelCase__ : Union[str, Any] = OrderedDict()
UpperCAmelCase__ : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase__ : Optional[int] = list_of_state_dict + cls_token(_A )
UpperCAmelCase__ : Union[str, Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
UpperCAmelCase__ : str = list_of_state_dict + attention(_A , _A )
UpperCAmelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
UpperCAmelCase__ : List[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 496
| 0
|
"""simple docstring"""
class UpperCAmelCase :
def __init__( self : Union[str, Any] , __lowerCamelCase : int ):
"""simple docstring"""
_snake_case = size
_snake_case = [0] * size
_snake_case = [0] * size
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index | (index + 1)
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return (index & (index + 1)) - 1
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
_snake_case = value
while index < self.size:
_snake_case = self.get_prev(__lowerCamelCase ) + 1
if current_left_border == index:
_snake_case = value
else:
_snake_case = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = self.get_next(__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
right -= 1 # Because of right is exclusive
_snake_case = 0
while left <= right:
_snake_case = self.get_prev(__lowerCamelCase )
if left <= current_left:
_snake_case = max(__lowerCamelCase , self.tree[right] )
_snake_case = current_left
else:
_snake_case = max(__lowerCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
snake_case = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103
| 1
|
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = []
if len(_lowercase ) == 1:
return [nums.copy()]
for _ in range(len(_lowercase ) ):
UpperCAmelCase_ : Dict = nums.pop(0 )
UpperCAmelCase_ : Tuple = permute(_lowercase )
for perm in permutations:
perm.append(_lowercase )
result.extend(_lowercase )
nums.append(_lowercase )
return result
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
def backtrack(_lowercase ):
if start == len(_lowercase ) - 1:
output.append(nums[:] )
else:
for i in range(_lowercase , len(_lowercase ) ):
UpperCAmelCase_ : Any = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Tuple = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__a = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 704
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = k_size // 2
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCAmelCase_ : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(_lowercase ) + square(_lowercase )) / (2 * square(_lowercase )) )
return g
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = image.shape[0], image.shape[1]
# dst image height and width
UpperCAmelCase_ : List[Any] = height - k_size + 1
UpperCAmelCase_ : Any = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCAmelCase_ : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
UpperCAmelCase_ : Optional[Any] = 0
for i, j in product(range(_lowercase ) , range(_lowercase ) ):
UpperCAmelCase_ : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
UpperCAmelCase_ : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCAmelCase_ : str = gen_gaussian_kernel(_lowercase , _lowercase )
UpperCAmelCase_ : List[Any] = ravel(_lowercase )
# reshape and get the dst image
UpperCAmelCase_ : List[str] = dot(_lowercase , _lowercase ).reshape(_lowercase , _lowercase ).astype(_lowercase )
return dst
if __name__ == "__main__":
# read original image
__a = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
__a = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__a = gaussian_filter(gray, 3, sigma=1)
__a = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 300
| 0
|
'''simple docstring'''
def _a (lowercase__ : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowercase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 56
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
A_ : List[str] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 196
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> None:
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> None:
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 720
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635
| 0
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=24 , lowercase__=2 , lowercase__=6 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=None , lowercase__=1000 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Any = seq_length
SCREAMING_SNAKE_CASE_ : List[str] = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = scope
SCREAMING_SNAKE_CASE_ : Optional[int] = range_bbox
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : int = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : int = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : int = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Any = t
SCREAMING_SNAKE_CASE_ : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = LiltModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase__ , bbox=lowercase__ , token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = model(lowercase__ , bbox=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = LiltForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = LiltForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(
lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_A = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
return True
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = LiltModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type
self.model_tester.create_and_check_model(*lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : str = LiltModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 2]] , device=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=lowercase__ , bbox=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = torch.Size([1, 2, 768] )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase__ , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase__ , atol=1e-3 ) )
| 421
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
snake_case_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE__ :
_A = 42
_A = None
_A = None
_A = None
_A = None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = _str_to_version_tuple(self.version_str )
def __repr__( self ):
"""simple docstring"""
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.major, self.minor, self.patch
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
return Version(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
return other
raise TypeError(F"{other} (type {type(lowercase__ )}) cannot be compared to version." )
def __eq__( self , lowercase__ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE_ : List[Any] = self._validate_operand(lowercase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self._validate_operand(lowercase__ )
return self.tuple < other.tuple
def __hash__( self ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __lowerCamelCase ( cls , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.version_str
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = _VERSION_REG.match(SCREAMING_SNAKE_CASE_ )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(SCREAMING_SNAKE_CASE_ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return ".".join(str(SCREAMING_SNAKE_CASE_ ) for v in version_tuple )
| 421
| 1
|
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**12 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 718
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
UpperCamelCase = DDIMScheduler()
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase = CLIPTextModel(UpperCamelCase__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=0 ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
UpperCamelCase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Optional[Any] ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Any ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = 'french fries'
UpperCamelCase = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ , view_batch_size=2 )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , skip_prk_steps=UpperCamelCase__ )
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] , UpperCamelCase__ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
UpperCamelCase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=UpperCamelCase__ )
UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def callback_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor ) -> None:
UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCamelCase = False
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCamelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A ( self : Optional[int] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCamelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 324
| 0
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
SCREAMING_SNAKE_CASE_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , reference_urls=[] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase = np.array([re.sub(lowerCamelCase_ , '''''' , lowerCamelCase_) for x in predictions])
UpperCamelCase = np.array([re.sub(lowerCamelCase_ , '''''' , lowerCamelCase_) for x in references])
else:
UpperCamelCase = np.asarray(lowerCamelCase_)
UpperCamelCase = np.asarray(lowerCamelCase_)
if ignore_case:
UpperCamelCase = np.char.lower(lowerCamelCase_)
UpperCamelCase = np.char.lower(lowerCamelCase_)
if ignore_punctuation:
UpperCamelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation)
UpperCamelCase = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_)
UpperCamelCase = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_)
if ignore_numbers:
UpperCamelCase = string.digits.maketrans('''''' , '''''' , string.digits)
UpperCamelCase = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_)
UpperCamelCase = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_)
UpperCamelCase = predictions == references
return {"exact_match": np.mean(lowerCamelCase_) * 1_0_0}
| 34
|
def _snake_case (__lowercase):
UpperCamelCase_ = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _snake_case (__lowercase):
UpperCamelCase_ = 0
while number > 0:
UpperCamelCase_ = number % 10
sum_of_digits += last_digit
UpperCamelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case (__lowercase = 100):
UpperCamelCase_ = factorial(__lowercase)
UpperCamelCase_ = split_and_add(__lowercase)
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 23
| 0
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( _A , _A , _A=None , **_A ) -> Any:
lowercase : int = [x.strip() for x in open(_A ).readlines()]
lowercase : Tuple = [x.strip() for x in open(_A ).readlines()][: len(_A )]
lowercase : Dict = calculate_rouge(_A , _A , **_A )
if save_path is not None:
save_json(_A , _A , indent=_A )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 712
|
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> str:
lowercase : list[list[str]] = [[] for _ in range(_A )]
lowercase : Any = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(_A ) <= key:
return input_string
for position, character in enumerate(_A ):
lowercase : Optional[int] = position % (lowest * 2) # puts it in bounds
lowercase : Dict = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_A )
lowercase : Optional[Any] = ["""""".join(_A ) for row in temp_grid]
lowercase : int = """""".join(_A )
return output_string
def UpperCamelCase ( _A , _A ) -> str:
lowercase : Optional[Any] = []
lowercase : int = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
lowercase : list[list[str]] = [[] for _ in range(_A )] # generates template
for position in range(len(_A ) ):
lowercase : Union[str, Any] = position % (lowest * 2) # puts it in bounds
lowercase : Union[str, Any] = min(_A , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
lowercase : Any = 0
for row in temp_grid: # fills in the characters
lowercase : Dict = input_string[counter : counter + len(_A )]
grid.append(list(_A ) )
counter += len(_A )
lowercase : Union[str, Any] = """""" # reads as zigzag
for position in range(len(_A ) ):
lowercase : List[str] = position % (lowest * 2) # puts it in bounds
lowercase : str = min(_A , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCamelCase ( _A ) -> dict[int, str]:
lowercase : int = {}
for key_guess in range(1 , len(_A ) ): # tries every key
lowercase : Dict = decrypt(_A , _A )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = True
@register_to_config
def __init__( self : Tuple , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (6_4,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : float = 0.1_82_15 , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , )
__lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCAmelCase = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
__lowerCAmelCase = False
__lowerCAmelCase = False
# only relevant if vae tiling is enabled
__lowerCAmelCase = self.config.sample_size
__lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCAmelCase = 0.25
def lowercase ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=False ) -> int:
if isinstance(lowerCAmelCase_ , (Encoder, Decoder) ):
__lowerCAmelCase = value
def lowercase ( self : Dict , lowerCAmelCase_ : bool = True ) -> Optional[int]:
__lowerCAmelCase = use_tiling
def lowercase ( self : Optional[Any] ) -> Any:
self.enable_tiling(lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = True
def lowercase ( self : Dict ) -> Optional[int]:
__lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase ( self : List[str] ) -> Dict[str, AttentionProcessor]:
__lowerCAmelCase = {}
def fn_recursive_add_processors(lowerCAmelCase_ : str , lowerCAmelCase_ : torch.nn.Module , lowerCAmelCase_ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCAmelCase_ , 'set_processor' ):
__lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCAmelCase_ , lowerCAmelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return processors
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> str:
__lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCAmelCase_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCAmelCase_ : str , lowerCAmelCase_ : torch.nn.Module , lowerCAmelCase_ : Optional[int] ):
if hasattr(lowerCAmelCase_ , 'set_processor' ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
module.set_processor(lowerCAmelCase_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCAmelCase_ , lowerCAmelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase = [self.encoder(lowerCAmelCase_ ) for x_slice in x.split(1 )]
__lowerCAmelCase = torch.cat(lowerCAmelCase_ )
else:
__lowerCAmelCase = self.encoder(lowerCAmelCase_ )
__lowerCAmelCase = self.quant_conv(lowerCAmelCase_ )
__lowerCAmelCase = DiagonalGaussianDistribution(lowerCAmelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
__lowerCAmelCase = self.post_quant_conv(lowerCAmelCase_ )
__lowerCAmelCase = self.decoder(lowerCAmelCase_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
@apply_forward_hook
def lowercase ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase = [self._decode(lowerCAmelCase_ ).sample for z_slice in z.split(1 )]
__lowerCAmelCase = torch.cat(lowerCAmelCase_ )
else:
__lowerCAmelCase = self._decode(lowerCAmelCase_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> Any:
__lowerCAmelCase = min(a.shape[2] , b.shape[2] , lowerCAmelCase_ )
for y in range(lowerCAmelCase_ ):
__lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> Tuple:
__lowerCAmelCase = min(a.shape[3] , b.shape[3] , lowerCAmelCase_ )
for x in range(lowerCAmelCase_ ):
__lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase ( self : int , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> AutoencoderKLOutput:
__lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase = []
for i in range(0 , x.shape[2] , lowerCAmelCase_ ):
__lowerCAmelCase = []
for j in range(0 , x.shape[3] , lowerCAmelCase_ ):
__lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase = self.encoder(lowerCAmelCase_ )
__lowerCAmelCase = self.quant_conv(lowerCAmelCase_ )
row.append(lowerCAmelCase_ )
rows.append(lowerCAmelCase_ )
__lowerCAmelCase = []
for i, row in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = []
for j, tile in enumerate(lowerCAmelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , lowerCAmelCase_ , lowerCAmelCase_ )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , lowerCAmelCase_ , lowerCAmelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCAmelCase_ , dim=3 ) )
__lowerCAmelCase = torch.cat(lowerCAmelCase_ , dim=2 )
__lowerCAmelCase = DiagonalGaussianDistribution(lowerCAmelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase = []
for i in range(0 , z.shape[2] , lowerCAmelCase_ ):
__lowerCAmelCase = []
for j in range(0 , z.shape[3] , lowerCAmelCase_ ):
__lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase = self.post_quant_conv(lowerCAmelCase_ )
__lowerCAmelCase = self.decoder(lowerCAmelCase_ )
row.append(lowerCAmelCase_ )
rows.append(lowerCAmelCase_ )
__lowerCAmelCase = []
for i, row in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = []
for j, tile in enumerate(lowerCAmelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , lowerCAmelCase_ , lowerCAmelCase_ )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , lowerCAmelCase_ , lowerCAmelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCAmelCase_ , dim=3 ) )
__lowerCAmelCase = torch.cat(lowerCAmelCase_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(lowerCAmelCase_ ).latent_dist
if sample_posterior:
__lowerCAmelCase = posterior.sample(generator=lowerCAmelCase_ )
else:
__lowerCAmelCase = posterior.mode()
__lowerCAmelCase = self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 53
|
UpperCamelCase = 8.3_144_598
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCamelCase = 300
UpperCamelCase = 28
UpperCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 269
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = '''▁'''
A = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
A = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
A = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = []
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
__a : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
__a : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__a : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : List[Any] = 1
__a : Dict = len(self.sp_model )
__a : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
__a : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__a : Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__a : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__a : List[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__a : Any = src_lang if src_lang is not None else '''en_XX'''
__a : List[Any] = self.lang_code_to_id[self._src_lang]
__a : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
__a : Dict = self.__dict__.copy()
__a : str = None
__a : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCAmelCase ):
__a : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a : str = {}
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowerCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCamelCase ( self ):
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__a : List[str] = [1] * len(self.prefix_tokens )
__a : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Union[str, Any] = [self.sep_token_id]
__a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a : Optional[int] = src_lang
__a : List[Any] = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
__a : Any = self.convert_tokens_to_ids(_UpperCAmelCase )
__a : Any = tgt_lang_id
return inputs
def _lowerCamelCase ( self ):
__a : int = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : Any = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self , _UpperCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Dict = ''''''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ''' ''' ).strip()
return out_string
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
__a : str = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = "en_XX" , _UpperCAmelCase = None , _UpperCAmelCase = "ro_RO" , **_UpperCAmelCase , ):
__a : List[Any] = src_lang
__a : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Union[str, Any] = self.lang_code_to_id[src_lang]
__a : int = []
__a : str = [self.eos_token_id, self.cur_lang_code]
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Tuple = self.lang_code_to_id[lang]
__a : Optional[Any] = []
__a : Dict = [self.eos_token_id, self.cur_lang_code]
| 703
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = OpenAIGPTTokenizer
__lowerCAmelCase = OpenAIGPTTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__a : str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : List[Any] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return "lower newer", "lower newer"
def _lowerCamelCase ( self ):
__a : Tuple = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__a : Tuple = '''lower'''
__a : Union[str, Any] = ['''low''', '''er</w>''']
__a : str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = tokens + ['''<unk>''']
__a : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
__a : str = '''This is a simple input'''
__a : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
__a : List[Any] = ('''This is a simple input''', '''This is a pair''')
__a : Optional[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
def _lowerCamelCase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
pass
| 101
| 0
|
'''simple docstring'''
import qiskit
def A__ ( UpperCAmelCase_ = 2 ):
_UpperCamelCase : Optional[int] = qubits
# Using Aer's simulator
_UpperCamelCase : str = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_UpperCamelCase : Any = qiskit.QuantumCircuit(_snake_case , _snake_case )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _snake_case ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _snake_case )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_snake_case ) ) , list(range(_snake_case ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_UpperCamelCase : List[Any] = qiskit.execute(_snake_case , _snake_case , shots=1_0_0_0 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 195
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( _snake_case : float ,_snake_case : int ):
'''simple docstring'''
lowercase__ = u
for i in range(1 ,_snake_case ):
lowercase__ = temp * (u - i)
return temp
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = int(input("enter the numbers of values: " ) )
lowercase__ = []
for _ in range(_snake_case ):
y.append([] )
for i in range(_snake_case ):
for j in range(_snake_case ):
y[i].append(_snake_case )
lowercase__ = 0
print("enter the values of parameters in a list: " )
lowercase__ = list(map(_snake_case ,input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_snake_case ):
lowercase__ = float(input() )
lowercase__ = int(input("enter the value to interpolate: " ) )
lowercase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,_snake_case ):
for j in range(n - i ):
lowercase__ = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ = y[0][0]
for i in range(1 ,_snake_case ):
summ += (ucal(_snake_case ,_snake_case ) * y[0][i]) / math.factorial(_snake_case )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 267
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """trocr"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self :Tuple , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=16 , lowerCamelCase_ :int=40_96 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[Any]=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :int=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Optional[Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[str]=2 , **lowerCamelCase_ :Any , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : List[str] = decoder_layers
SCREAMING_SNAKE_CASE : Any = decoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Dict = init_std
SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : List[str] = scale_embedding
SCREAMING_SNAKE_CASE : str = use_learned_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 18
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18
| 1
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = torch.exp(_lowerCAmelCase )
_lowerCamelCase : int = torch.sum(_lowerCAmelCase , dim=1 ) # sum of exp(x_i)
_lowerCamelCase : Optional[int] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCAmelCase ) - B / A
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : Tuple,__A : List[Any] ):
super().__init__()
_lowerCamelCase : Optional[int] = config.output_attentions
_lowerCamelCase : Any = config.output_hidden_states
_lowerCamelCase : str = nn.ModuleList([BertLayer(__A ) for _ in range(config.num_hidden_layers )] )
_lowerCamelCase : str = nn.ModuleList([BertHighway(__A ) for _ in range(config.num_hidden_layers )] )
_lowerCamelCase : List[str] = [-1 for _ in range(config.num_hidden_layers )]
def lowerCamelCase_ ( self : List[Any],__A : int ):
if (type(__A ) is float) or (type(__A ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCamelCase : List[Any] = x
else:
_lowerCamelCase : List[str] = x
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, Any] ):
_lowerCamelCase : List[str] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCamelCase_ ( self : List[str],__A : int,__A : Any=None,__A : Any=None,__A : int=None,__A : Optional[int]=None,):
_lowerCamelCase : Tuple = ()
_lowerCamelCase : Tuple = ()
_lowerCamelCase : Optional[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCamelCase : int = all_hidden_states + (hidden_states,)
_lowerCamelCase : List[Any] = layer_module(
__A,__A,head_mask[i],__A,__A )
_lowerCamelCase : Dict = layer_outputs[0]
if self.output_attentions:
_lowerCamelCase : str = all_attentions + (layer_outputs[1],)
_lowerCamelCase : List[str] = (hidden_states,)
if self.output_hidden_states:
_lowerCamelCase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCamelCase : Dict = current_outputs + (all_attentions,)
_lowerCamelCase : Any = self.highway[i](__A )
# logits, pooled_output
if not self.training:
_lowerCamelCase : int = highway_exit[0]
_lowerCamelCase : Optional[Any] = entropy(__A )
_lowerCamelCase : Tuple = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCamelCase : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCamelCase : int = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__A,i + 1 )
else:
_lowerCamelCase : int = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCamelCase : Optional[Any] = all_hidden_states + (hidden_states,)
_lowerCamelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCamelCase : int = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCamelCase : Optional[int] = outputs + (all_attentions,)
_lowerCamelCase : Tuple = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , A , )
class UpperCAmelCase__ ( A ):
def __init__( self : Any,__A : int ):
super().__init__(__A )
_lowerCamelCase : str = config
_lowerCamelCase : str = BertEmbeddings(__A )
_lowerCamelCase : Tuple = DeeBertEncoder(__A )
_lowerCamelCase : List[str] = BertPooler(__A )
self.init_weights()
def lowerCamelCase_ ( self : Dict ):
self.encoder.init_highway_pooler(self.pooler )
def lowerCamelCase_ ( self : Tuple ):
return self.embeddings.word_embeddings
def lowerCamelCase_ ( self : Any,__A : Optional[int] ):
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : Tuple,__A : int ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__A )
@add_start_docstrings_to_model_forward(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[str]=None,__A : int=None,__A : Union[str, Any]=None,__A : Tuple=None,__A : Dict=None,__A : int=None,__A : Dict=None,__A : List[str]=None,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
_lowerCamelCase : List[str] = input_ids.size()
elif inputs_embeds is not None:
_lowerCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
_lowerCamelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCamelCase : List[str] = torch.ones(__A,device=__A )
if encoder_attention_mask is None:
_lowerCamelCase : List[Any] = torch.ones(__A,device=__A )
if token_type_ids is None:
_lowerCamelCase : List[str] = torch.zeros(__A,dtype=torch.long,device=__A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCamelCase : torch.Tensor = self.get_extended_attention_mask(__A,__A,__A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCamelCase : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCamelCase : Optional[int] = encoder_attention_mask[:, None, None, :]
_lowerCamelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCamelCase : Any = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCamelCase : Union[str, Any] = self.get_head_mask(__A,self.config.num_hidden_layers )
_lowerCamelCase : List[Any] = self.embeddings(
input_ids=__A,position_ids=__A,token_type_ids=__A,inputs_embeds=__A )
_lowerCamelCase : Union[str, Any] = self.encoder(
__A,attention_mask=__A,head_mask=__A,encoder_hidden_states=__A,encoder_attention_mask=__A,)
_lowerCamelCase : Union[str, Any] = encoder_outputs[0]
_lowerCamelCase : Tuple = self.pooler(__A )
_lowerCamelCase : Optional[int] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : int,__A : int ):
_lowerCamelCase : Tuple = message
_lowerCamelCase : Union[str, Any] = exit_layer # start from 1!
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : int,__A : List[Any] ):
super().__init__()
_lowerCamelCase : List[Any] = BertPooler(__A )
_lowerCamelCase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowerCamelCase : Tuple = nn.Linear(config.hidden_size,config.num_labels )
def lowerCamelCase_ ( self : Tuple,__A : Union[str, Any] ):
# Pooler
_lowerCamelCase : int = encoder_outputs[0]
_lowerCamelCase : Tuple = self.pooler(__A )
# "return" pooler_output
# BertModel
_lowerCamelCase : Any = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCamelCase : List[Any] = bmodel_output[1]
_lowerCamelCase : Optional[int] = self.dropout(__A )
_lowerCamelCase : Optional[Any] = self.classifier(__A )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , A , )
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : Optional[int] ):
super().__init__(__A )
_lowerCamelCase : Tuple = config.num_labels
_lowerCamelCase : Dict = config.num_hidden_layers
_lowerCamelCase : Union[str, Any] = DeeBertModel(__A )
_lowerCamelCase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowerCamelCase : int = nn.Linear(config.hidden_size,self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__A )
def lowerCamelCase_ ( self : Dict,__A : Any=None,__A : Optional[Any]=None,__A : Union[str, Any]=None,__A : Union[str, Any]=None,__A : Optional[int]=None,__A : Optional[int]=None,__A : Union[str, Any]=None,__A : Optional[int]=-1,__A : Any=False,):
_lowerCamelCase : Optional[Any] = self.num_layers
try:
_lowerCamelCase : Optional[Any] = self.bert(
__A,attention_mask=__A,token_type_ids=__A,position_ids=__A,head_mask=__A,inputs_embeds=__A,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCamelCase : Union[str, Any] = outputs[1]
_lowerCamelCase : List[Any] = self.dropout(__A )
_lowerCamelCase : str = self.classifier(__A )
_lowerCamelCase : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCamelCase : Tuple = e.message
_lowerCamelCase : Optional[int] = e.exit_layer
_lowerCamelCase : Optional[Any] = outputs[0]
if not self.training:
_lowerCamelCase : List[Any] = entropy(__A )
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase : List[Any] = MSELoss()
_lowerCamelCase : Tuple = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
_lowerCamelCase : Optional[int] = CrossEntropyLoss()
_lowerCamelCase : Optional[int] = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
_lowerCamelCase : Union[str, Any] = []
for highway_exit in outputs[-1]:
_lowerCamelCase : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase : List[str] = MSELoss()
_lowerCamelCase : str = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
_lowerCamelCase : Optional[int] = CrossEntropyLoss()
_lowerCamelCase : str = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
_lowerCamelCase : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCamelCase : Union[str, Any] = (loss,) + outputs
if not self.training:
_lowerCamelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCamelCase : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 377
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__a : List[Any] = get_logger(__name__)
class A ( enum.Enum ):
_SCREAMING_SNAKE_CASE : List[Any] = '''all_checks'''
_SCREAMING_SNAKE_CASE : Any = '''basic_checks'''
_SCREAMING_SNAKE_CASE : List[Any] = '''no_checks'''
class A ( lowerCamelCase_ ):
pass
class A ( lowerCamelCase_ ):
pass
class A ( lowerCamelCase_ ):
pass
class A ( lowerCamelCase_ ):
pass
def a_ ( __snake_case , __snake_case , __snake_case=None ) -> Optional[int]:
'''simple docstring'''
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__snake_case ) - set(__snake_case ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__snake_case ) - set(__snake_case ) ) )
if len(set(__snake_case ) - set(__snake_case ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__snake_case ) - set(__snake_case ) ) )
UpperCamelCase_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase_ = ' for ' + verification_name if verification_name is not None else ''
if len(__snake_case ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A ( lowerCamelCase_ ):
pass
class A ( lowerCamelCase_ ):
pass
class A ( lowerCamelCase_ ):
pass
class A ( lowerCamelCase_ ):
pass
def a_ ( __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__snake_case ) - set(__snake_case ) ) > 0:
raise ExpectedMoreSplits(str(set(__snake_case ) - set(__snake_case ) ) )
if len(set(__snake_case ) - set(__snake_case ) ) > 0:
raise UnexpectedSplits(str(set(__snake_case ) - set(__snake_case ) ) )
UpperCamelCase_ = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__snake_case ) > 0:
raise NonMatchingSplitsSizesError(str(__snake_case ) )
logger.info('All the splits matched successfully.' )
def a_ ( __snake_case , __snake_case = True ) -> dict:
'''simple docstring'''
if record_checksum:
UpperCamelCase_ = shaaaa()
with open(__snake_case , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B'' ):
m.update(__snake_case )
UpperCamelCase_ = m.hexdigest()
else:
UpperCamelCase_ = None
return {"num_bytes": os.path.getsize(__snake_case ), "checksum": checksum}
def a_ ( __snake_case ) -> Dict:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 702
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__a : Any = logging.getLogger(__name__)
__a : Dict = 50 # max width of layer names
__a : int = 70 # max width of quantizer names
def a_ ( __snake_case ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase_ = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=__snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=__snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=__snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=__snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=__snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=__snake_case , type=__snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=__snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def a_ ( __snake_case ) -> Optional[int]:
'''simple docstring'''
if args.calibrator == "max":
UpperCamelCase_ = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
UpperCamelCase_ = 'histogram'
elif args.calibrator == "mse":
UpperCamelCase_ = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
UpperCamelCase_ = QuantDescriptor(num_bits=args.aprec , calib_method=__snake_case )
UpperCamelCase_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(__snake_case )
def a_ ( __snake_case , __snake_case , __snake_case=False , __snake_case=False ) -> List[str]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__snake_case , ['embeddings'] , which='weight' , _disabled=__snake_case )
if args.quant_disable:
set_quantizer_by_name(__snake_case , [''] , _disabled=__snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(__snake_case , args.quant_disable_keyword , _disabled=__snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(__snake_case , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=__snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(__snake_case , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=__snake_case )
if args.recalibrate_weights:
recalibrate_weights(__snake_case )
if args.fuse_qkv:
fuse_qkv(__snake_case , __snake_case )
if args.clip_gelu:
clip_gelu(__snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__snake_case )
def a_ ( __snake_case ) -> Tuple:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def a_ ( __snake_case , __snake_case ) -> Any:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__snake_case )
def a_ ( __snake_case , __snake_case ) -> int:
'''simple docstring'''
def fusea(__snake_case , __snake_case , __snake_case ):
for mod in [qq, qk, qv]:
if not hasattr(__snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
UpperCamelCase_ = qq._amax.detach().item()
UpperCamelCase_ = qk._amax.detach().item()
UpperCamelCase_ = qv._amax.detach().item()
UpperCamelCase_ = max(__snake_case , __snake_case , __snake_case )
qq._amax.fill_(__snake_case )
qk._amax.fill_(__snake_case )
qv._amax.fill_(__snake_case )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a_ ( __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
UpperCamelCase_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__snake_case )
UpperCamelCase_ = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def a_ ( __snake_case ) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
UpperCamelCase_ = mod.weight.shape[0]
UpperCamelCase_ = mod._weight_quantizer._amax.detach()
UpperCamelCase_ = torch.ones(__snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def a_ ( __snake_case ) -> List[Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCamelCase_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCamelCase_ = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCamelCase_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__snake_case , keepdims=__snake_case ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
UpperCamelCase_ = amax
def a_ ( __snake_case , __snake_case=2_5 , __snake_case=1_8_0 , __snake_case=None ) -> Tuple:
'''simple docstring'''
if ignore is None:
UpperCamelCase_ = []
elif not isinstance(__snake_case , __snake_case ):
UpperCamelCase_ = [ignore]
UpperCamelCase_ = 0
for name, mod in model.named_modules():
if not hasattr(__snake_case , 'weight' ):
continue
UpperCamelCase_ = max(__snake_case , len(__snake_case ) )
for name, mod in model.named_modules():
UpperCamelCase_ = getattr(__snake_case , '_input_quantizer' , __snake_case )
UpperCamelCase_ = getattr(__snake_case , '_weight_quantizer' , __snake_case )
if not hasattr(__snake_case , 'weight' ):
continue
if type(__snake_case ) in ignore:
continue
if [True for s in ignore if type(__snake_case ) is str and s in name]:
continue
UpperCamelCase_ = F'''Act:{input_q.extra_repr()}'''
UpperCamelCase_ = F'''Wgt:{weight_q.extra_repr()}'''
UpperCamelCase_ = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__snake_case ) <= line_width:
logger.info(__snake_case )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def a_ ( __snake_case ) -> List[str]:
'''simple docstring'''
UpperCamelCase_ = 0
for name, mod in model.named_modules():
if isinstance(__snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def a_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
UpperCamelCase_ = getattr(__snake_case , __snake_case , __snake_case )
if quantizer_mod is not None:
assert hasattr(__snake_case , __snake_case )
setattr(__snake_case , __snake_case , __snake_case )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def a_ ( __snake_case , __snake_case , __snake_case="both" , **__snake_case ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase_ = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__snake_case , __snake_case , '_input_quantizer' , __snake_case , __snake_case )
if which in ["weight", "both"]:
set_quantizer(__snake_case , __snake_case , '_weight_quantizer' , __snake_case , __snake_case )
logger.info(__snake_case )
def a_ ( __snake_case , __snake_case , **__snake_case ) -> str:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__snake_case , '_input_quantizer' ) or hasattr(__snake_case , '_weight_quantizer' ):
for n in names:
if re.search(__snake_case , __snake_case ):
set_quantizers(__snake_case , __snake_case , **__snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(__snake_case , __snake_case ):
UpperCamelCase_ = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(__snake_case , __snake_case , __snake_case )
logger.info(__snake_case )
| 559
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_SCREAMING_SNAKE_CASE = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Dict = "albert"
def __init__( self , _lowerCAmelCase=30000 , _lowerCAmelCase=128 , _lowerCAmelCase=4096 , _lowerCAmelCase=12 , _lowerCAmelCase=1 , _lowerCAmelCase=64 , _lowerCAmelCase=16384 , _lowerCAmelCase=1 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase="absolute" , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , **_lowerCAmelCase , ) -> str:
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = embedding_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_hidden_groups
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = inner_group_num
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = classifier_dropout_prob
_lowerCAmelCase = position_embedding_type
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 18
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
_A : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_A : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {}
with open(lowerCAmelCase_ , '''r''' ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = line.strip()
if line:
SCREAMING_SNAKE_CASE__ = line.split()
SCREAMING_SNAKE_CASE__ = line_number
SCREAMING_SNAKE_CASE__ = words[0]
SCREAMING_SNAKE_CASE__ = value
return result
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE__ = value[0]
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
SCREAMING_SNAKE_CASE__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE__ = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ = '''.'''.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE__ = key
SCREAMING_SNAKE_CASE__ = value if '''lm_head''' in full_key else value[0]
_A : Union[str, Any] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple:
SCREAMING_SNAKE_CASE__ = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = '''weight'''
else:
SCREAMING_SNAKE_CASE__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ) -> int:
if config_path is not None:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE__ = read_txt_into_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = WavaVecaForSequenceClassification(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCAmelCase_ , '''vocab.json''' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaForCTC(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE__ = fairseq.tasks.setup_task(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_A : List[str] = parser.parse_args()
_A : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 100
| 0
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCamelCase ( lowercase_: Any ) -> int:
A__ : Dict = {}
A__ : Union[str, Any] = job["""started_at"""]
A__ : Any = job["""completed_at"""]
A__ : Optional[int] = date_parser.parse(snake_case__ )
A__ : List[str] = date_parser.parse(snake_case__ )
A__ : int = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ : Dict = start
A__ : Dict = end
A__ : Any = duration_in_min
return job_info
def UpperCamelCase ( lowercase_: Tuple , lowercase_: Tuple=None ) -> List[Any]:
A__ : Optional[Any] = None
if token is not None:
A__ : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
A__ : Tuple = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
A__ : Optional[Any] = requests.get(snake_case__ , headers=snake_case__ ).json()
A__ : int = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} )
A__ : Optional[int] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case__ ):
A__ : List[str] = requests.get(url + f"""&page={i + 2}""" , headers=snake_case__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
A_ : Optional[int] = parser.parse_args()
A_ : Any = get_job_time(args.workflow_run_id)
A_ : List[str] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 706
|
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 0
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_lowercase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_lowercase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_lowercase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase ( self , _lowercase , _lowercase , _lowercase=None , _lowercase="uniform_average" , _lowercase=True ):
"""simple docstring"""
_lowerCAmelCase = mean_squared_error(
_lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase )
return {"mse": mse}
| 5
|
from __future__ import annotations
lowerCAmelCase__ : Union[str, Any] =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( A__, A__, A__, A__, A__, ):
SCREAMING_SNAKE_CASE_ : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) )
] # the action grid
SCREAMING_SNAKE_CASE_ : Optional[int] = init[0]
SCREAMING_SNAKE_CASE_ : List[str] = init[1]
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ : List[str] = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ : Dict = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(A__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ : Any = cell.pop()
SCREAMING_SNAKE_CASE_ : Any = next_cell[2]
SCREAMING_SNAKE_CASE_ : str = next_cell[3]
SCREAMING_SNAKE_CASE_ : Tuple = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ : Dict = True
else:
for i in range(len(A__ ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ : Optional[int] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ : List[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = g + cost
SCREAMING_SNAKE_CASE_ : List[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : int = i
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : List[str] = goal[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ : Any = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ : int = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ : int = xa
SCREAMING_SNAKE_CASE_ : List[Any] = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(len(A__ ) ):
path.append(invpath[len(A__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCAmelCase__ : Any =[0, 0]
# all coordinates are given in format [y,x]
lowerCAmelCase__ : List[str] =[len(grid) - 1, len(grid[0]) - 1]
lowerCAmelCase__ : str =1
# the cost map which pushes the path closer to the goal
lowerCAmelCase__ : Union[str, Any] =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCAmelCase__ : List[str] =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCAmelCase__ : List[str] =99
lowerCAmelCase__ ,lowerCAmelCase__ : Any =search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 101
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self: Tuple ):
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self: Dict ):
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _A ( self: List[str] ):
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__UpperCamelCase , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 717
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase :int = logging.getLogger(__name__)
class UpperCAmelCase ( __snake_case ):
a: int = "sequence-classification"
def __init__( self: int , __UpperCamelCase: str ):
if type(__UpperCamelCase ) == dict:
_a = Namespace(**__UpperCamelCase )
_a = glue_output_modes[hparams.task]
_a = glue_tasks_num_labels[hparams.task]
super().__init__(__UpperCamelCase , __UpperCamelCase , self.mode )
def _A ( self: Dict , **__UpperCamelCase: str ):
return self.model(**__UpperCamelCase )
def _A ( self: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: Union[str, Any] ):
_a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_a = self(**__UpperCamelCase )
_a = outputs[0]
_a = self.trainer.lr_schedulers[0]['''scheduler''']
_a = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _A ( self: Union[str, Any] ):
_a = self.hparams
_a = processors[args.task]()
_a = processor.get_labels()
for mode in ["train", "dev"]:
_a = self._feature_file(__UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __UpperCamelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_a = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_a = convert_examples_to_features(
__UpperCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __UpperCamelCase )
torch.save(__UpperCamelCase , __UpperCamelCase )
def _A ( self: str , __UpperCamelCase: str , __UpperCamelCase: int , __UpperCamelCase: bool = False ):
_a = '''dev''' if mode == '''test''' else mode
_a = self._feature_file(__UpperCamelCase )
logger.info('''Loading features from cached file %s''' , __UpperCamelCase )
_a = torch.load(__UpperCamelCase )
_a = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_a = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_a = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_a = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_a = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , batch_size=__UpperCamelCase , shuffle=__UpperCamelCase , )
def _A ( self: int , __UpperCamelCase: Tuple , __UpperCamelCase: Tuple ):
_a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_a = self(**__UpperCamelCase )
_a , _a = outputs[:2]
_a = logits.detach().cpu().numpy()
_a = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _A ( self: List[str] , __UpperCamelCase: Any ):
_a = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_a = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_a = np.argmax(__UpperCamelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_a = np.squeeze(__UpperCamelCase )
_a = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_a = [[] for _ in range(out_label_ids.shape[0] )]
_a = [[] for _ in range(out_label_ids.shape[0] )]
_a = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __UpperCamelCase , __UpperCamelCase )}
_a = dict(results.items() )
_a = results
return ret, preds_list, out_label_list
def _A ( self: Any , __UpperCamelCase: list ):
_a , _a , _a = self._eval_end(__UpperCamelCase )
_a = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _A ( self: List[Any] , __UpperCamelCase: Any ):
_a , _a , _a = self._eval_end(__UpperCamelCase )
_a = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _A ( __UpperCamelCase: Optional[Any] , __UpperCamelCase: int ):
BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__UpperCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__UpperCamelCase , required=__UpperCamelCase , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__UpperCamelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __snake_case ( ) -> List[Any]:
_a = argparse.ArgumentParser()
add_generic_args(_UpperCamelCase , os.getcwd() )
_a = GLUETransformer.add_model_specific_args(_UpperCamelCase , os.getcwd() )
_a = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_a = os.path.join(
'''./results''' , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
_a = GLUETransformer(_UpperCamelCase )
_a = generic_train(_UpperCamelCase , _UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_a = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_UpperCamelCase ) )
_a = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCamelCase )
if __name__ == "__main__":
main()
| 346
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
set_seed(770)
UpperCamelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
UpperCamelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
UpperCamelCase = os.path.dirname(os.path.abspath(__file__))
UpperCamelCase = os.path.join(os.path.expanduser('~'), '.cache')
UpperCamelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=False ):
"""simple docstring"""
lowerCAmelCase__ = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Optional[Any]="text" ):
"""simple docstring"""
if model_type == "text":
lowerCAmelCase__ = BarkSemanticModel
lowerCAmelCase__ = BarkSemanticConfig
lowerCAmelCase__ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase__ = BarkCoarseModel
lowerCAmelCase__ = BarkCoarseConfig
lowerCAmelCase__ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase__ = BarkFineModel
lowerCAmelCase__ = BarkFineConfig
lowerCAmelCase__ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase__ = F'{model_type}_small' if use_small else model_type
lowerCAmelCase__ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info["repo_id"] , model_info["file_name"] )
lowerCAmelCase__ = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
lowerCAmelCase__ = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
lowerCAmelCase__ = model_args["vocab_size"]
lowerCAmelCase__ = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase__ = model_args.pop("n_head" )
lowerCAmelCase__ = model_args.pop("n_embd" )
lowerCAmelCase__ = model_args.pop("n_layer" )
lowerCAmelCase__ = ConfigClass(**checkpoint["model_args"] )
lowerCAmelCase__ = ModelClass(config=lowerCAmelCase_ )
lowerCAmelCase__ = GenerationConfigClass()
lowerCAmelCase__ = model_generation_config
lowerCAmelCase__ = checkpoint["model"]
# fixup checkpoint
lowerCAmelCase__ = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase__ = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase__ = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
lowerCAmelCase__ = state_dict.pop(lowerCAmelCase_ )
lowerCAmelCase__ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase__ = {k for k in extra_keys if not k.endswith(".attn.bias" )}
lowerCAmelCase__ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase__ = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
lowerCAmelCase__ = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
lowerCAmelCase__ = checkpoint["best_val_loss"].item()
logger.info(F'model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss' )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=False , lowerCAmelCase_ : int="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase__ = "cpu" # do conversion on cpu
lowerCAmelCase__ = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
lowerCAmelCase__ = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
lowerCAmelCase__ = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
lowerCAmelCase__ = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
lowerCAmelCase__ = 5
lowerCAmelCase__ = 10
if model_type in ["text", "coarse"]:
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCAmelCase__ = bark_model(lowerCAmelCase_ )[0]
lowerCAmelCase__ = model(lowerCAmelCase_ )
# take last logits
lowerCAmelCase__ = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase__ = 3
lowerCAmelCase__ = 8
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCAmelCase__ = model(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
lowerCAmelCase__ = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
lowerCAmelCase__ = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
lowerCAmelCase__ = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
lowerCAmelCase__ = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = BarkFineModel.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = EncodecModel.from_pretrained("facebook/encodec_24khz" )
lowerCAmelCase__ = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCAmelCase__ = BarkModel(lowerCAmelCase_ )
lowerCAmelCase__ = semantic
lowerCAmelCase__ = coarseAcoustic
lowerCAmelCase__ = fineAcoustic
lowerCAmelCase__ = codec
lowerCAmelCase__ = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
UpperCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 61
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="openai/whisper-base"
SCREAMING_SNAKE_CASE_ : List[str] =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
SCREAMING_SNAKE_CASE_ : int ="transcriber"
SCREAMING_SNAKE_CASE_ : Tuple =WhisperProcessor
SCREAMING_SNAKE_CASE_ : List[Any] =WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE_ : Union[str, Any] =["audio"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_features
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(inputs=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0]
| 282
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_lowercase = logging.getLogger(__name__)
class lowerCamelCase__ :
def __init__( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Any = False
def lowerCamelCase_ ( self : List[Any] , __a : str , __a : Optional[Any] , __a : Tuple , __a : List[Any] ):
'''simple docstring'''
if not self.initialized:
lowerCamelCase__: List[Any] = RagRetriever(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , )
lowerCamelCase__: Tuple = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.retriever.index.init_index()
def lowerCamelCase_ ( self : Union[str, Any] , __a : Dict , __a : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str = self.retriever._main_retrieve(__a , __a )
return doc_ids, retrieved_doc_embeds
class lowerCamelCase__ ( A__ ):
def __init__( self : Union[str, Any] , __a : Any , __a : int , __a : Optional[int] , __a : Optional[int] , __a : Optional[Any]=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__a ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , )
lowerCamelCase__: Optional[int] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__a , __a , __a , __a )
for worker in self.retrieval_workers
] )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCamelCase_ ( self : List[str] , __a : Dict , __a : List[str] ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCamelCase__: str = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCamelCase__ , lowerCamelCase__: Tuple = ray.get(random_worker.retrieve.remote(__a , __a ) )
else:
lowerCamelCase__ , lowerCamelCase__: Dict = self._main_retrieve(__a , __a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__a )
@classmethod
def lowerCamelCase_ ( cls : Tuple , __a : Optional[Any] , __a : str=None , **__a : Union[str, Any] ):
'''simple docstring'''
return super(__a , cls ).get_tokenizers(__a , __a , **__a )
@classmethod
def lowerCamelCase_ ( cls : Any , __a : str , __a : int , __a : List[str]=None , **__a : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: Dict = kwargs.pop("""config""" , __a ) or RagConfig.from_pretrained(__a , **__a )
lowerCamelCase__: List[Any] = RagTokenizer.from_pretrained(__a , config=__a )
lowerCamelCase__: Union[str, Any] = rag_tokenizer.question_encoder
lowerCamelCase__: Tuple = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCamelCase__: List[str] = """custom"""
lowerCamelCase__: str = CustomHFIndex(config.retrieval_vector_size , __a )
else:
lowerCamelCase__: Optional[int] = cls._build_index(__a )
return cls(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , retrieval_workers=__a , index=__a , )
| 242
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCamelCase__ :
def __init__( self : str , __a : List[str] , __a : Union[str, Any]=None , __a : List[Any]=None , __a : Tuple=None , __a : Any="resnet50" , __a : List[str]=3 , __a : str=32 , __a : str=3 , __a : Tuple=True , __a : Dict=True , ):
'''simple docstring'''
lowerCamelCase__: int = parent
lowerCamelCase__: Tuple = out_indices if out_indices is not None else [4]
lowerCamelCase__: List[Any] = stage_names
lowerCamelCase__: int = out_features
lowerCamelCase__: Tuple = backbone
lowerCamelCase__: Tuple = batch_size
lowerCamelCase__: List[Any] = image_size
lowerCamelCase__: List[Any] = num_channels
lowerCamelCase__: Any = use_pretrained_backbone
lowerCamelCase__: Union[str, Any] = is_training
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__: List[str] = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase_ ( self : Any , __a : Optional[int] , __a : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase__: int = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__: str = config_and_inputs
lowerCamelCase__: Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCamelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
__lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
__lowerCamelCase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[str] = TimmBackboneModelTester(self )
lowerCamelCase__: Tuple = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = """resnet18"""
lowerCamelCase__: List[str] = """microsoft/resnet-18"""
lowerCamelCase__: Dict = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
lowerCamelCase__: int = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCamelCase__: List[Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
lowerCamelCase__: Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[Any] = model_class(__a )
lowerCamelCase__: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase__: Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: List[Any] = True
lowerCamelCase__: Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCamelCase__: Optional[int] = self.all_model_classes[0]
lowerCamelCase__: Tuple = model_class(__a )
model.to(__a )
lowerCamelCase__: Any = self._prepare_for_class(__a , __a )
lowerCamelCase__: int = model(**__a )
lowerCamelCase__: List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCamelCase__: str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCamelCase__: Tuple = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Dict = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase__: Dict = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCamelCase__: Optional[Any] = copy.deepcopy(__a )
lowerCamelCase__: str = None
lowerCamelCase__: int = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase__: Union[str, Any] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCamelCase__: Optional[int] = copy.deepcopy(__a )
lowerCamelCase__: str = False
lowerCamelCase__: List[Any] = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase__: Dict = model(**__a )
| 242
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__snake_case :Any =None
try:
import msvcrt
except ImportError:
__snake_case :Union[str, Any] =None
try:
import fcntl
except ImportError:
__snake_case :str =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__snake_case :str =OSError
# Data
# ------------------------------------------------
__snake_case :Any =[
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
__snake_case :str ='3.0.12'
__snake_case :str =None
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
global _logger
A = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : Union[str, Any] ) -> List[Any]:
A = lock_file
return None
def __str__( self : List[Any] ) -> int:
A = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class lowerCAmelCase__ :
def __init__( self : int , __UpperCamelCase : Union[str, Any] ) -> List[str]:
A = lock
return None
def __enter__( self : Dict ) -> Dict:
return self.lock
def __exit__( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ) -> Optional[int]:
self.lock.release()
return None
class lowerCAmelCase__ :
def __init__( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=-1 , __UpperCamelCase : Optional[Any]=None ) -> Dict:
A = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
A = self.hash_filename_if_too_long(__UpperCamelCase , __UpperCamelCase )
# The path to the lock file.
A = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
A = None
# The default timeout value.
A = timeout
# We use this lock primarily for the lock counter.
A = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
A = 0
return None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
return self._lock_file
@property
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
return self._timeout
@timeout.setter
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Any ) -> Tuple:
A = float(__UpperCamelCase )
return None
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
raise NotImplementedError()
def __UpperCamelCase ( self : int ) -> str:
raise NotImplementedError()
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
return self._lock_file_fd is not None
def __UpperCamelCase ( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=0.0_5 ) -> Any:
# Use the default timeout, if no timeout is provided.
if timeout is None:
A = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
A = id(self )
A = self._lock_file
A = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(__UpperCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
A = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Tuple=False ) -> Tuple:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
A = id(self )
A = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
A = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : int ) -> Dict:
self.acquire()
return self
def __exit__( self : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ) -> Dict:
self.release()
return None
def __del__( self : Union[str, Any] ) -> Optional[int]:
self.release(force=__UpperCamelCase )
return None
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int ) -> str:
A = os.path.basename(__UpperCamelCase )
if len(__UpperCamelCase ) > max_length and max_length > 0:
A = os.path.dirname(__UpperCamelCase )
A = str(hash(__UpperCamelCase ) )
A = filename[: max_length - len(__UpperCamelCase ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(__UpperCamelCase , __UpperCamelCase )
else:
return path
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple=-1 , __UpperCamelCase : Optional[Any]=None ) -> Union[str, Any]:
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCamelCase , timeout=__UpperCamelCase , max_filename_length=__UpperCamelCase )
A = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase ( self : Any ) -> Any:
A = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
A = os.open(self._lock_file , __UpperCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCamelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCamelCase )
else:
A = fd
return None
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
A = self._lock_file_fd
A = None
msvcrt.locking(__UpperCamelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any]=-1 , __UpperCamelCase : Dict=None ) -> Dict:
A = os.statvfs(os.path.dirname(__UpperCamelCase ) ).f_namemax
super().__init__(__UpperCamelCase , timeout=__UpperCamelCase , max_filename_length=__UpperCamelCase )
def __UpperCamelCase ( self : Any ) -> int:
A = os.O_RDWR | os.O_CREAT | os.O_TRUNC
A = os.open(self._lock_file , __UpperCamelCase )
try:
fcntl.flock(__UpperCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCamelCase )
else:
A = fd
return None
def __UpperCamelCase ( self : Optional[int] ) -> int:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
A = self._lock_file_fd
A = None
fcntl.flock(__UpperCamelCase , fcntl.LOCK_UN )
os.close(__UpperCamelCase )
return None
class lowerCAmelCase__ ( _lowerCamelCase ):
def __UpperCamelCase ( self : int ) -> Optional[int]:
A = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
A = os.open(self._lock_file , __UpperCamelCase )
except OSError:
pass
else:
A = fd
return None
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
os.close(self._lock_file_fd )
A = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__snake_case :List[str] =None
if msvcrt:
__snake_case :List[Any] =WindowsFileLock
elif fcntl:
__snake_case :Any =UnixFileLock
else:
__snake_case :Tuple =SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 106
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_A = logging.getLogger(__name__)
def lowercase () -> List[str]:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" ,type=_snake_case ,default="wikitext" ,help="Name of the training. Explore datasets at: hf.co/datasets." ,)
parser.add_argument(
"--dataset_config" ,type=_snake_case ,default="wikitext-103-raw-v1" ,help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" ,type=_snake_case ,default="sayakpaul/unigram-tokenizer-wikitext" ,help="Tokenizer identifier. Can be a local filepath or a Hub identifier." ,)
parser.add_argument(
"--shard_size" ,type=_snake_case ,default=1000 ,help="Number of entries to go in a single shard." ,)
parser.add_argument("--split" ,type=_snake_case ,default="train" ,choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" ,default=_snake_case ,type=_snake_case ,help="Limit the number of shards (used for debugging)." ,)
parser.add_argument(
"--max_length" ,type=_snake_case ,default=512 ,help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." ,)
parser.add_argument(
"--output_dir" ,default="tf-tpu" ,type=_snake_case ,help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." ,)
__UpperCamelCase = parser.parse_args()
return args
def lowercase (_snake_case ) -> List[Any]:
'''simple docstring'''
def fn(_snake_case ):
return tokenizer(examples["text"] )
return fn
def lowercase (_snake_case ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = []
for i in range(len(tokenized_data["input_ids"] ) ):
__UpperCamelCase = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__UpperCamelCase = tf.train.Features(feature=_snake_case )
__UpperCamelCase = tf.train.Example(features=_snake_case )
__UpperCamelCase = example.SerializeToString()
records.append(_snake_case )
return records
def lowercase (_snake_case ) -> Dict:
'''simple docstring'''
__UpperCamelCase = datasets.load_dataset(args.dataset_name ,args.dataset_config ,split=args.split )
if args.limit is not None:
__UpperCamelCase = min(len(_snake_case ) ,args.limit )
__UpperCamelCase = dataset.select(range(_snake_case ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
__UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__UpperCamelCase = os.path.join(args.output_dir ,args.split )
if not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
else:
__UpperCamelCase = os.path.join(args.output_dir ,args.split )
# Tokenize the whole dataset at once.
__UpperCamelCase = tokenize_function(_snake_case )
__UpperCamelCase = dataset.map(_snake_case ,batched=_snake_case ,num_proc=4 ,remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_snake_case ):
# Concatenate all texts.
__UpperCamelCase = {k: sum(examples[k] ,[] ) for k in examples.keys()}
__UpperCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__UpperCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__UpperCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 ,_snake_case ,args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__UpperCamelCase = dataset_tokenized.map(_snake_case ,batched=_snake_case ,batch_size=1000 ,num_proc=4 )
__UpperCamelCase = 0
__UpperCamelCase = 0
for shard in range(0 ,len(_snake_case ) ,args.shard_size ):
__UpperCamelCase = grouped_dataset[shard : shard + args.shard_size]
__UpperCamelCase = len(dataset_snapshot["input_ids"] )
__UpperCamelCase = os.path.join(_snake_case ,f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
__UpperCamelCase = get_serialized_examples(_snake_case )
with tf.io.TFRecordWriter(_snake_case ) as out_file:
for i in range(len(_snake_case ) ):
__UpperCamelCase = serialized_examples[i]
out_file.write(_snake_case )
print("Wrote file {} containing {} records".format(_snake_case ,_snake_case ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" ,"w" ) as f:
print(f"""Total {args.split} records: {total_records}""" ,file=_snake_case )
if __name__ == "__main__":
_A = parse_args()
main(args)
| 505
| 0
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase = 1000 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 1, 1
UpperCAmelCase__ : List[str] = []
for i in range(1 , n + 1 ):
UpperCAmelCase__ : List[str] = prev_numerator + 2 * prev_denominator
UpperCAmelCase__ : List[Any] = prev_numerator + prev_denominator
if len(str(__UpperCamelCase ) ) > len(str(__UpperCamelCase ) ):
result.append(__UpperCamelCase )
UpperCAmelCase__ : List[str] = numerator
UpperCAmelCase__ : int = denominator
return len(__UpperCamelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 704
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase__ : Optional[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
| 194
| 0
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
UpperCamelCase : List[Any] = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Dict = None
UpperCamelCase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
UpperCamelCase : Optional[int] = self.builder.as_dataset(
split="""train""" , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A , _A , _A = None , _A = None , **_A , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCamelCase : Tuple = dataset
UpperCamelCase : Dict = name
UpperCamelCase : Optional[int] = con
UpperCamelCase : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase : Optional[int] = num_proc
UpperCamelCase : Tuple = to_sql_kwargs
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = self.to_sql_kwargs.pop("""sql""" , _A )
UpperCamelCase : Optional[Any] = self.to_sql_kwargs.pop("""con""" , _A )
UpperCamelCase : List[Any] = self.to_sql_kwargs.pop("""index""" , _A )
UpperCamelCase : int = self._write(index=_A , **self.to_sql_kwargs )
return written
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = args
UpperCamelCase : Tuple = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCamelCase : Any = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase : List[str] = batch.to_pandas()
UpperCamelCase : List[str] = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def _a ( self , _A , **_A ):
'''simple docstring'''
UpperCamelCase : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase , UpperCamelCase : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 102
|
_lowercase = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 306
| 0
|
import warnings
from .generation import TFGenerationMixin
class lowercase__ ( __A ):
# warning at import time
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , __A , )
| 440
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowercase__ :
@staticmethod
def UpperCAmelCase__ ( *_lowercase , **_lowercase ):
pass
def _lowerCAmelCase ( _a : List[str] ) -> Any:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase_ : Optional[Any] = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
__UpperCamelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : Optional[int] = pipeline(
"""document-question-answering""" , model=_lowercase , tokenizer=_lowercase , image_processor=_lowercase )
lowerCAmelCase_ : List[str] = INVOICE_URL
lowerCAmelCase_ : str = list(zip(*apply_tesseract(load_image(_lowercase ) , _lowercase , """""" ) ) )
lowerCAmelCase_ : int = """What is the placebo?"""
lowerCAmelCase_ : str = [
{
"""image""": load_image(_lowercase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : List[str] = dqa_pipeline(_lowercase , top_k=2 )
self.assertEqual(
_lowercase , [
[
{"""score""": ANY(_lowercase ), """answer""": ANY(_lowercase ), """start""": ANY(_lowercase ), """end""": ANY(_lowercase )},
{"""score""": ANY(_lowercase ), """answer""": ANY(_lowercase ), """start""": ANY(_lowercase ), """end""": ANY(_lowercase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
lowerCAmelCase_ : str = INVOICE_URL
lowerCAmelCase_ : int = """How many cats are there?"""
lowerCAmelCase_ : List[Any] = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
lowerCAmelCase_ : Optional[Any] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(nested_simplify(_lowercase , decimals=4 ) , _lowercase )
lowerCAmelCase_ : Any = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(_lowercase , decimals=4 ) , _lowercase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase_ : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCAmelCase_ : Any = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(_lowercase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase_ : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCAmelCase_ : int = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = dqa_pipeline(image=_lowercase , question=_lowercase , words=_lowercase , boxes=_lowercase , top_k=2 )
self.assertEqual(_lowercase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
lowerCAmelCase_ : Dict = INVOICE_URL
lowerCAmelCase_ : int = """What is the invoice number?"""
lowerCAmelCase_ : Union[str, Any] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : List[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[int] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
lowerCAmelCase_ : List[str] = INVOICE_URL
lowerCAmelCase_ : Optional[int] = """What is the invoice number?"""
lowerCAmelCase_ : List[Any] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : Optional[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=_lowercase )
lowerCAmelCase_ : Any = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=_lowercase , revision="""3dc6de3""" , )
lowerCAmelCase_ : int = INVOICE_URL
lowerCAmelCase_ : Union[str, Any] = """What is the invoice number?"""
lowerCAmelCase_ : Optional[Any] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
lowerCAmelCase_ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
lowerCAmelCase_ : Union[str, Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
lowerCAmelCase_ : int = list(zip(*apply_tesseract(load_image(_lowercase ) , _lowercase , """""" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase_ : List[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=_lowercase )
lowerCAmelCase_ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=_lowercase , revision="""3dc6de3""" , max_seq_len=50 , )
lowerCAmelCase_ : Tuple = INVOICE_URL
lowerCAmelCase_ : Tuple = """What is the invoice number?"""
lowerCAmelCase_ : Optional[int] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
lowerCAmelCase_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(_lowercase ) , _lowercase , """""" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase_ : List[str] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
lowerCAmelCase_ : Union[str, Any] = INVOICE_URL
lowerCAmelCase_ : int = """What is the invoice number?"""
lowerCAmelCase_ : str = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(nested_simplify(_lowercase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def UpperCAmelCase__ ( self ):
pass
| 440
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a__ ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def __UpperCamelCase ( a__ : Tuple) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __UpperCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
| 227
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase )
| 667
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 213
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Dict = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
__magic_name__: Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
] , )
__magic_name__: List[str] = text_generator.model.config.eos_token_id
__magic_name__: Dict = """<pad>"""
__magic_name__: Dict = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
__magic_name__: Optional[int] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple ) -> Any:
__magic_name__: int = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Tuple = """Hello I believe in"""
__magic_name__: List[str] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__: List[Any] = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
__magic_name__: List[str] = text_generator(__snake_case , stop_sequence=""" fe""" )
self.assertEqual(__snake_case , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCamelCase__ ( self : Any , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = text_generator.model
__magic_name__: Union[str, Any] = text_generator.tokenizer
__magic_name__: Union[str, Any] = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: str = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = pipeline(task="""text-generation""" , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
__magic_name__: Tuple = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: List[str] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__magic_name__: Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
__magic_name__: Any = text_generator("""test""" , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: List[str] = text_generator("""test""" , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: Tuple = text_generator("""test""" , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__magic_name__: int = text_generator("""""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__magic_name__: Any = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__magic_name__: Union[str, Any] = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
__magic_name__: List[str] = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
import torch
# Classic `model_kwargs`
__magic_name__: Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[int] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__magic_name__: Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[Any] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__magic_name__: int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__magic_name__: Any = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : Dict ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__snake_case , top_p=0.5 )
def lowerCamelCase__ ( self : List[str] ) -> Any:
__magic_name__: Optional[int] = """Hello world"""
__magic_name__: List[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
__magic_name__: str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
__magic_name__: Any = logging.get_logger("""transformers.generation.utils""" )
__magic_name__: Union[str, Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: str = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 )
self.assertNotIn(__snake_case , cl.out )
| 213
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.