code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : str = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = emb.weight.shape
_lowerCAmelCase : Any = nn.Linear(_lowerCamelCase ,_lowerCamelCase ,bias=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Any="facebook/mbart-large-en-ro" ,_lowerCamelCase : List[Any]=False ,_lowerCamelCase : Any=False ) -> int:
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase ,map_location="""cpu""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : List[str] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_lowerCAmelCase : Any = MBartConfig.from_pretrained(_lowerCamelCase ,vocab_size=_lowerCamelCase )
if mbart_aa and finetuned:
_lowerCAmelCase : Any = """relu"""
_lowerCAmelCase : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_lowerCAmelCase : Optional[int] = MBartForConditionalGeneration(_lowerCamelCase )
model.model.load_state_dict(_lowerCamelCase )
if finetuned:
_lowerCAmelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
_a : Optional[Any] = parser.parse_args()
_a : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 44
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 1
|
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : Optional[int] = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase ,"""triangle.txt""" )
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Dict = f.readlines()
_lowerCAmelCase : int = []
for line in triangle:
_lowerCAmelCase : Optional[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 ,len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
_lowerCAmelCase : Any = a[i - 1][j] if j != len(a[i - 1] ) else 0
_lowerCAmelCase : List[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase ,_lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 44
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 1
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_a : int = 'pt'
elif is_tf_available():
_a : Union[str, Any] = 'tf'
else:
_a : Union[str, Any] = 'jax'
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = ByTaTokenizer
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def __A ( self , **a__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ , a__=False , a__=20 , a__=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowerCAmelCase : Optional[int] = []
for i in range(len(a__ ) ):
try:
_lowerCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCAmelCase : Tuple = list(filter(lambda a__ : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , a__ ) )
_lowerCAmelCase : str = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
_lowerCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
_lowerCAmelCase : str = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase : Any = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
_lowerCAmelCase : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
_lowerCAmelCase : int = """ """ + output_txt
_lowerCAmelCase : Dict = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def __A ( self ):
_lowerCAmelCase : Tuple = self.ta_base_tokenizer
_lowerCAmelCase : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_lowerCAmelCase : Dict = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def __A ( self ):
_lowerCAmelCase : str = self.ta_base_tokenizer
_lowerCAmelCase : List[str] = """Unicode €."""
_lowerCAmelCase : Optional[Any] = tokenizer(a__ )
_lowerCAmelCase : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , a__ )
# decoding
_lowerCAmelCase : Optional[int] = tokenizer.decode(a__ )
self.assertEqual(a__ , """Unicode €.</s>""" )
_lowerCAmelCase : Tuple = tokenizer("""e è é ê ë""" )
_lowerCAmelCase : Any = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , a__ )
# decoding
_lowerCAmelCase : Any = tokenizer.decode(a__ )
self.assertEqual(a__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def __A ( self ):
_lowerCAmelCase : Dict = self.ta_base_tokenizer
_lowerCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_lowerCAmelCase : str = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowerCAmelCase : List[str] = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
_lowerCAmelCase : List[str] = list(batch.input_ids.numpy()[0] )
else:
_lowerCAmelCase : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __A ( self ):
_lowerCAmelCase : Tuple = self.ta_base_tokenizer
_lowerCAmelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase : List[str] = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , a__ )
self.assertIn("""attention_mask""" , a__ )
self.assertNotIn("""decoder_input_ids""" , a__ )
self.assertNotIn("""decoder_attention_mask""" , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.ta_base_tokenizer
_lowerCAmelCase : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
_lowerCAmelCase : Any = tokenizer(
text_target=a__ , max_length=32 , padding="""max_length""" , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __A ( self ):
_lowerCAmelCase : int = self.ta_base_tokenizer
_lowerCAmelCase : List[str] = ["""A long paragraph for summarization. </s>"""]
_lowerCAmelCase : List[str] = ["""Summary of the text. </s>"""]
# fmt: off
_lowerCAmelCase : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowerCAmelCase : List[str] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowerCAmelCase : Optional[Any] = tokenizer(a__ , text_target=a__ )
self.assertEqual(a__ , batch["""input_ids"""][0] )
self.assertEqual(a__ , batch["""labels"""][0] )
def __A ( self ):
# safety check on max_len default value so we are sure the test works
_lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = """ He is very happy, UNwant\u00E9d,running"""
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
_lowerCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(a__ )
_lowerCAmelCase : Any = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
_lowerCAmelCase : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : List[str] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_lowerCAmelCase : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_lowerCAmelCase : List[str] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
_lowerCAmelCase : str = tokenizer.__class__.from_pretrained(a__ )
_lowerCAmelCase : List[str] = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowerCAmelCase : Any = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_lowerCAmelCase : List[Any] = json.load(a__ )
with open(os.path.join(a__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_lowerCAmelCase : Tuple = json.load(a__ )
_lowerCAmelCase : int = [F"<extra_id_{i}>" for i in range(125 )]
_lowerCAmelCase : List[str] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_lowerCAmelCase : Tuple = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCAmelCase : str = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCAmelCase : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=a__ )]
_lowerCAmelCase : Dict = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def __A ( self ):
_lowerCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
_lowerCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(a__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
_lowerCAmelCase : str = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : Dict = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(
a__ , skip_special_tokens=a__ )
for attr in attributes_list:
setattr(a__ , attr + """_id""" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + """_id""" ) , a__ )
setattr(a__ , attr + """_id""" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + """_id""" ) , a__ )
setattr(a__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(a__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(a__ , """additional_special_tokens_ids""" ) , [] )
setattr(a__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(a__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(a__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 44
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[Any] = str(_lowerCamelCase )
while len(_lowerCamelCase ) != 1:
_lowerCAmelCase : int = [int(_lowerCamelCase ) for i in num_string]
_lowerCAmelCase : Dict = 1
for i in range(0 ,len(_lowerCamelCase ) ):
total *= numbers[i]
_lowerCAmelCase : Tuple = str(_lowerCamelCase )
steps += 1
return steps
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Any = str(_lowerCamelCase )
while len(_lowerCamelCase ) != 1:
_lowerCAmelCase : Optional[int] = [int(_lowerCamelCase ) for i in num_string]
_lowerCAmelCase : str = 0
for i in range(0 ,len(_lowerCamelCase ) ):
total += numbers[i]
_lowerCAmelCase : Tuple = str(_lowerCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = {'vocab_file': 'spiece.model'}
_a : Dict = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_a : List[str] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
_a : str = '▁'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=100 , a__=None , a__ = None , a__=True , **a__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : List[Any] = [F"<extra_id_{i}>" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : Union[str, Any] = len(set(filter(lambda a__ : bool("""extra_id""" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
_lowerCAmelCase : str = legacy
_lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : Any = extra_ids
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def __A ( a__ , a__ , a__ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCAmelCase : List[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , a__ , )
return max_model_length
@property
def __A ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def __A ( self ):
return list(
set(filter(lambda a__ : bool(re.search(r"""<extra_id_\d+>""" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ):
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def __A ( self , a__ ):
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : Optional[Any] = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
_lowerCAmelCase : Tuple = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self , a__ ):
_lowerCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , a__ , **a__ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_lowerCAmelCase : Union[str, Any] = SPIECE_UNDERLINE + text.replace(a__ , """ """ )
return super().tokenize(a__ , **a__ )
def __A ( self , a__ , **a__ ):
if not self.legacy:
_lowerCAmelCase : int = text.startswith(a__ )
if is_first:
_lowerCAmelCase : Tuple = text[1:]
_lowerCAmelCase : List[Any] = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(a__ ):
_lowerCAmelCase : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __A ( self , a__ ):
if token.startswith("""<extra_id_""" ):
_lowerCAmelCase : Any = re.match(r"""<extra_id_(\d+)>""" , a__ )
_lowerCAmelCase : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def __A ( self , a__ ):
if index < self.sp_model.get_piece_size():
_lowerCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(a__ )
else:
_lowerCAmelCase : Union[str, Any] = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[str] = """"""
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(a__ )
_lowerCAmelCase : Tuple = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 44
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : List[str] ) -> int:
_lowerCAmelCase : str = original_name.split(""".""" )[0]
_lowerCAmelCase : Optional[int] = key.split(""".""" )
_lowerCAmelCase : Any = int(key_list[key_list.index(_lowerCamelCase ) - 2] )
_lowerCAmelCase : Tuple = int(key_list[key_list.index(_lowerCamelCase ) - 1] )
_lowerCAmelCase : Optional[int] = orig_block_num - offset
_lowerCAmelCase : Tuple = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" ,f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase , _lowerCAmelCase : int = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
_lowerCAmelCase : Union[str, Any] = key.replace("""network""" ,"""poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCAmelCase : Optional[Any] = key[: key.find("""proj""" )]
_lowerCAmelCase : Optional[int] = key.replace(_lowerCamelCase ,f"patch_embeddings.{total_embed_found}." )
_lowerCAmelCase : Tuple = key.replace("""proj""" ,"""projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCAmelCase : Optional[Any] = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
_lowerCAmelCase : Optional[int] = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""mlp.fc1""" ,"""output.conv1""" )
if "mlp.fc2" in key:
_lowerCAmelCase : List[str] = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""mlp.fc2""" ,"""output.conv2""" )
if "norm1" in key:
_lowerCAmelCase : List[Any] = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""norm1""" ,"""before_norm""" )
if "norm2" in key:
_lowerCAmelCase : List[str] = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""norm2""" ,"""after_norm""" )
if "layer_scale_1" in key:
_lowerCAmelCase : Dict = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""layer_scale_1""" ,"""layer_scale_1""" )
if "layer_scale_2" in key:
_lowerCAmelCase : Tuple = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""layer_scale_2""" ,"""layer_scale_2""" )
if "head" in key:
_lowerCAmelCase : Union[str, Any] = key.replace("""head""" ,"""classifier""" )
_lowerCAmelCase : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any ,_lowerCamelCase : str ) -> Dict:
_lowerCAmelCase : int = PoolFormerConfig()
# set attributes based on model_name
_lowerCAmelCase : int = """huggingface/label-files"""
_lowerCAmelCase : Tuple = model_name[-3:]
_lowerCAmelCase : str = 1000
_lowerCAmelCase : Tuple = """imagenet-1k-id2label.json"""
_lowerCAmelCase : Dict = (1, 1000)
# set config attributes
_lowerCAmelCase : List[str] = json.load(open(hf_hub_download(_lowerCamelCase ,_lowerCamelCase ,repo_type="""dataset""" ) ,"""r""" ) )
_lowerCAmelCase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCAmelCase : List[str] = [2, 2, 6, 2]
_lowerCAmelCase : int = [64, 128, 320, 512]
_lowerCAmelCase : int = 4.0
_lowerCAmelCase : List[Any] = 0.9
elif size == "s24":
_lowerCAmelCase : Tuple = [4, 4, 12, 4]
_lowerCAmelCase : Any = [64, 128, 320, 512]
_lowerCAmelCase : int = 4.0
_lowerCAmelCase : List[str] = 0.9
elif size == "s36":
_lowerCAmelCase : List[str] = [6, 6, 18, 6]
_lowerCAmelCase : Optional[Any] = [64, 128, 320, 512]
_lowerCAmelCase : Any = 4.0
_lowerCAmelCase : int = 1e-6
_lowerCAmelCase : str = 0.9
elif size == "m36":
_lowerCAmelCase : str = [6, 6, 18, 6]
_lowerCAmelCase : Any = [96, 192, 384, 768]
_lowerCAmelCase : Tuple = 4.0
_lowerCAmelCase : List[Any] = 1e-6
_lowerCAmelCase : List[str] = 0.95
elif size == "m48":
_lowerCAmelCase : Union[str, Any] = [8, 8, 24, 8]
_lowerCAmelCase : int = [96, 192, 384, 768]
_lowerCAmelCase : str = 4.0
_lowerCAmelCase : Any = 1e-6
_lowerCAmelCase : str = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
_lowerCAmelCase : List[str] = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
# Prepare image
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : str = image_processor(images=_lowerCamelCase ,return_tensors="""pt""" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
_lowerCAmelCase : List[Any] = torch.load(_lowerCamelCase ,map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase : str = rename_keys(_lowerCamelCase )
# create HuggingFace model and load state dict
_lowerCAmelCase : Optional[int] = PoolFormerForImageClassification(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Define image processor
_lowerCAmelCase : str = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = image_processor(images=prepare_img() ,return_tensors="""pt""" ).pixel_values
# forward pass
_lowerCAmelCase : Union[str, Any] = model(_lowerCamelCase )
_lowerCAmelCase : Dict = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCAmelCase : str = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
_lowerCAmelCase : Optional[Any] = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
_lowerCAmelCase : Union[str, Any] = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
_lowerCAmelCase : str = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
_lowerCAmelCase : Optional[int] = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,_lowerCamelCase ,atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_a : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_a : str = get_logger(__name__)
_a : Tuple = Path(__file__).parent / 'model_card_template.md'
_a : Tuple = uuida().hex
_a : Dict = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_a : int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_a : Any = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[Dict, str, None] = None ) -> str:
_lowerCAmelCase : Any = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" ,"""""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Optional[str] = None ,_lowerCamelCase : Optional[str] = None ) -> Any:
if token is None:
_lowerCAmelCase : str = HfFolder.get_token()
if organization is None:
_lowerCAmelCase : Optional[Any] = whoami(_lowerCamelCase )["""name"""]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ) -> int:
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(_lowerCamelCase ,"""local_rank""" ) and args.local_rank not in [-1, 0]:
return
_lowerCAmelCase : Optional[int] = args.hub_token if hasattr(_lowerCamelCase ,"""hub_token""" ) else None
_lowerCAmelCase : int = get_full_repo_name(_lowerCamelCase ,token=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" ,license="""apache-2.0""" ,library_name="""diffusers""" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=_lowerCamelCase ,model_name=_lowerCamelCase ,repo_name=_lowerCamelCase ,dataset_name=args.dataset_name if hasattr(_lowerCamelCase ,"""dataset_name""" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_lowerCamelCase ,"""gradient_accumulation_steps""" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(_lowerCamelCase ,"""adam_beta1""" ) else None ,adam_betaa=args.adam_betaa if hasattr(_lowerCamelCase ,"""adam_beta2""" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(_lowerCamelCase ,"""adam_weight_decay""" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(_lowerCamelCase ,"""adam_epsilon""" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(_lowerCamelCase ,"""lr_scheduler""" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(_lowerCamelCase ,"""lr_warmup_steps""" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(_lowerCamelCase ,"""ema_inv_gamma""" ) else None ,ema_power=args.ema_power if hasattr(_lowerCamelCase ,"""ema_power""" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(_lowerCamelCase ,"""ema_max_decay""" ) else None ,mixed_precision=args.mixed_precision ,)
_lowerCAmelCase : Optional[int] = os.path.join(args.output_dir ,"""README.md""" )
model_card.save(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[str] ,_lowerCamelCase : Optional[str] = None ) -> Optional[int]:
if resolved_file is None or commit_hash is not None:
return commit_hash
_lowerCAmelCase : List[str] = str(Path(_lowerCamelCase ).as_posix() )
_lowerCAmelCase : List[Any] = re.search(r"""snapshots/([^/]+)/""" ,_lowerCamelCase )
if search is None:
return None
_lowerCAmelCase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_lowerCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_a : int = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_a : Union[str, Any] = os.path.join(hf_cache_home, 'diffusers')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[str] = None ,_lowerCamelCase : Optional[str] = None ) -> None:
if new_cache_dir is None:
_lowerCAmelCase : List[str] = DIFFUSERS_CACHE
if old_cache_dir is None:
_lowerCAmelCase : str = old_diffusers_cache
_lowerCAmelCase : List[str] = Path(_lowerCamelCase ).expanduser()
_lowerCAmelCase : List[str] = Path(_lowerCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_lowerCAmelCase : Optional[int] = new_cache_dir / old_blob_path.relative_to(_lowerCamelCase )
new_blob_path.parent.mkdir(parents=_lowerCamelCase ,exist_ok=_lowerCamelCase )
os.replace(_lowerCamelCase ,_lowerCamelCase )
try:
os.symlink(_lowerCamelCase ,_lowerCamelCase )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_a : Any = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_a : Dict = 0
else:
with open(cache_version_file) as f:
try:
_a : Optional[int] = int(f.read())
except ValueError:
_a : int = 0
if cache_version < 1:
_a : Any = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_a : Optional[int] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Optional[str] = None ) -> str:
if variant is not None:
_lowerCAmelCase : int = weights_name.split(""".""" )
_lowerCAmelCase : str = splits[:-1] + [variant] + splits[-1:]
_lowerCAmelCase : Union[str, Any] = """.""".join(_lowerCamelCase )
return weights_name
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,*,
_lowerCamelCase : Tuple ,_lowerCamelCase : str ,_lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : Any ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict=None ,) -> Any:
_lowerCAmelCase : Tuple = str(_lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(_lowerCamelCase ):
if os.path.isfile(os.path.join(_lowerCamelCase ,_lowerCamelCase ) ):
# Load from a PyTorch checkpoint
_lowerCAmelCase : List[str] = os.path.join(_lowerCamelCase ,_lowerCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) ):
_lowerCAmelCase : Union[str, Any] = os.path.join(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse("""0.20.0""" )
):
try:
_lowerCAmelCase : Tuple = hf_hub_download(
_lowerCamelCase ,filename=_add_variant(_lowerCamelCase ,_lowerCamelCase ) ,cache_dir=_lowerCamelCase ,force_download=_lowerCamelCase ,proxies=_lowerCamelCase ,resume_download=_lowerCamelCase ,local_files_only=_lowerCamelCase ,use_auth_token=_lowerCamelCase ,user_agent=_lowerCamelCase ,subfolder=_lowerCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." ,_lowerCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_lowerCamelCase ,_lowerCamelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_lowerCamelCase ,_lowerCamelCase )}' so that the correct variant file can be added." ,_lowerCamelCase ,)
try:
# 2. Load model file as usual
_lowerCAmelCase : Tuple = hf_hub_download(
_lowerCamelCase ,filename=_lowerCamelCase ,cache_dir=_lowerCamelCase ,force_download=_lowerCamelCase ,proxies=_lowerCamelCase ,resume_download=_lowerCamelCase ,local_files_only=_lowerCamelCase ,use_auth_token=_lowerCamelCase ,user_agent=_lowerCamelCase ,subfolder=_lowerCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"""this model name. Check the model page at """
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 44
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_lowerCamelCase ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_lowerCamelCase ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_lowerCamelCase )
return parser.parse_args()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : List[Any] = parse_args()
# Import training_script as a module.
_lowerCAmelCase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase : Union[str, Any] = script_fpath.stem
_lowerCAmelCase : Optional[Any] = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
_lowerCAmelCase : Tuple = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 1
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Tuple = '▁'
_a : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_a : Any = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_a : List[Any] = {'vinai/bartpho-syllable': 1_024}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : List[str] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_lowerCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : str = monolingual_vocab_file
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Tuple = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(a__ ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase : Dict = cnt
cnt += 1
with open(a__ , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase : str = line.strip().split()[0]
_lowerCAmelCase : Any = len(self.fairseq_tokens_to_ids )
if str(a__ ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase : str = len(self.fairseq_tokens_to_ids )
_lowerCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
_lowerCAmelCase : Dict = self.__dict__.copy()
_lowerCAmelCase : str = None
_lowerCAmelCase : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__ ):
_lowerCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
_lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self ):
return len(self.fairseq_ids_to_tokens )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ ):
return self.sp_model.encode(a__ , out_type=a__ )
def __A ( self , a__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __A ( self , a__ ):
return self.fairseq_ids_to_tokens[index]
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = """""".join(a__ ).replace(a__ , """ """ ).strip()
return out_string
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[int] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Optional[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
_lowerCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
a__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , a__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(a__ , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(a__ )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 44
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 1
|
"""simple docstring"""
import numpy as np
class __A :
def __init__( self ):
_lowerCAmelCase : Optional[Any] = (0, 0)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = 0
def __eq__( self , a__ ):
return self.position == cell.position
def __A ( self ):
print(self.position )
class __A :
def __init__( self , a__=(5, 5) ):
_lowerCAmelCase : List[Any] = np.zeros(a__ )
_lowerCAmelCase : List[str] = world_size[0]
_lowerCAmelCase : List[str] = world_size[1]
def __A ( self ):
print(self.w )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_lowerCAmelCase : Optional[Any] = cell.position[0]
_lowerCAmelCase : str = cell.position[1]
_lowerCAmelCase : Any = []
for n in neughbour_cord:
_lowerCAmelCase : List[str] = current_x + n[0]
_lowerCAmelCase : List[str] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_lowerCAmelCase : Optional[int] = Cell()
_lowerCAmelCase : List[str] = (x, y)
_lowerCAmelCase : Optional[int] = cell
neighbours.append(a__ )
return neighbours
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Any = []
_open.append(_lowerCamelCase )
while _open:
_lowerCAmelCase : List[str] = np.argmin([n.f for n in _open] )
_lowerCAmelCase : int = _open[min_f]
_closed.append(_open.pop(_lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCamelCase ):
for c in _closed:
if c == n:
continue
_lowerCAmelCase : Any = current.g + 1
_lowerCAmelCase , _lowerCAmelCase : Tuple = n.position
_lowerCAmelCase , _lowerCAmelCase : Dict = goal.position
_lowerCAmelCase : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
_lowerCAmelCase : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCamelCase )
_lowerCAmelCase : Dict = []
while current.parent is not None:
path.append(current.position )
_lowerCAmelCase : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_a : Any = Gridworld()
# Start position and goal
_a : Optional[int] = Cell()
_a : Dict = (0, 0)
_a : Dict = Cell()
_a : Union[str, Any] = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
_a : List[str] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_a : Optional[Any] = 1
print(world.w)
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 1
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
_a : Tuple = logging.getLogger(__name__)
@dataclass
class __A :
_UpperCamelCase : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class __A :
_UpperCamelCase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Train language if it is different from the evaluation language."} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" ,_lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : int = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_lowerCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_lowerCAmelCase : Any = load_dataset(
"""xnli""" ,model_args.language ,split="""train""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
_lowerCAmelCase : Dict = load_dataset(
"""xnli""" ,model_args.train_language ,split="""train""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : Tuple = train_dataset.features["""label"""].names
if training_args.do_eval:
_lowerCAmelCase : List[str] = load_dataset(
"""xnli""" ,model_args.language ,split="""validation""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : List[Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
_lowerCAmelCase : Union[str, Any] = load_dataset(
"""xnli""" ,model_args.language ,split="""test""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : Union[str, Any] = predict_dataset.features["""label"""].names
# Labels
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=_lowerCamelCase ,idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )} ,labelaid={label: i for i, label in enumerate(_lowerCamelCase )} ,finetuning_task="""xnli""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_lowerCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_lowerCAmelCase : Optional[int] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCAmelCase : List[str] = False
def preprocess_function(_lowerCamelCase : Dict ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] ,examples["""hypothesis"""] ,padding=_lowerCamelCase ,max_length=data_args.max_seq_length ,truncation=_lowerCamelCase ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Any = min(len(_lowerCamelCase ) ,data_args.max_train_samples )
_lowerCAmelCase : str = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
_lowerCAmelCase : Optional[int] = train_dataset.map(
_lowerCamelCase ,batched=_lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on train dataset""" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : Optional[Any] = min(len(_lowerCamelCase ) ,data_args.max_eval_samples )
_lowerCAmelCase : str = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
_lowerCAmelCase : List[str] = eval_dataset.map(
_lowerCamelCase ,batched=_lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on validation dataset""" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_lowerCAmelCase : Optional[Any] = min(len(_lowerCamelCase ) ,data_args.max_predict_samples )
_lowerCAmelCase : int = predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
_lowerCAmelCase : Tuple = predict_dataset.map(
_lowerCamelCase ,batched=_lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on prediction dataset""" ,)
# Get the metric function
_lowerCAmelCase : Optional[Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : EvalPrediction ):
_lowerCAmelCase : str = p.predictions[0] if isinstance(p.predictions ,_lowerCamelCase ) else p.predictions
_lowerCAmelCase : str = np.argmax(_lowerCamelCase ,axis=1 )
return metric.compute(predictions=_lowerCamelCase ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCAmelCase : List[str] = default_data_collator
elif training_args.fpaa:
_lowerCAmelCase : List[str] = DataCollatorWithPadding(_lowerCamelCase ,pad_to_multiple_of=8 )
else:
_lowerCAmelCase : List[Any] = None
# Initialize our Trainer
_lowerCAmelCase : Union[str, Any] = Trainer(
model=_lowerCamelCase ,args=_lowerCamelCase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=_lowerCamelCase ,tokenizer=_lowerCamelCase ,data_collator=_lowerCamelCase ,)
# Training
if training_args.do_train:
_lowerCAmelCase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : int = last_checkpoint
_lowerCAmelCase : List[str] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
_lowerCAmelCase : Dict = train_result.metrics
_lowerCAmelCase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : List[str] = min(_lowerCamelCase ,len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" ,_lowerCamelCase )
trainer.save_metrics("""train""" ,_lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase : Tuple = trainer.evaluate(eval_dataset=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCAmelCase : Any = min(_lowerCamelCase ,len(_lowerCamelCase ) )
trainer.log_metrics("""eval""" ,_lowerCamelCase )
trainer.save_metrics("""eval""" ,_lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = trainer.predict(_lowerCamelCase ,metric_key_prefix="""predict""" )
_lowerCAmelCase : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : Dict = min(_lowerCamelCase ,len(_lowerCamelCase ) )
trainer.log_metrics("""predict""" ,_lowerCamelCase )
trainer.save_metrics("""predict""" ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = np.argmax(_lowerCamelCase ,axis=1 )
_lowerCAmelCase : Optional[Any] = os.path.join(training_args.output_dir ,"""predictions.txt""" )
if trainer.is_world_process_zero():
with open(_lowerCamelCase ,"""w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Tuple = label_list[item]
writer.write(f"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 1
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = 8
# DPR tok
_lowerCAmelCase : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(a__ , exist_ok=a__ )
_lowerCAmelCase : int = os.path.join(a__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_lowerCAmelCase : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCAmelCase : str = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : List[str] = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(a__ , exist_ok=a__ )
_lowerCAmelCase : Dict = os.path.join(a__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Tuple = os.path.join(a__ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __A ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __A ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_dummy_dataset()
_lowerCAmelCase : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_lowerCAmelCase : List[Any] = dataset
_lowerCAmelCase : Optional[Any] = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self.get_dummy_dataset()
_lowerCAmelCase : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , """dataset""" )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
_lowerCAmelCase : Optional[int] = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_lowerCAmelCase : int = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , a__ ) , )
return retriever
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
_lowerCAmelCase : Union[str, Any] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(a__ , open(a__ , """wb""" ) )
_lowerCAmelCase : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
_lowerCAmelCase : str = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __A ( self ):
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : Dict = self.get_dummy_canonical_hf_index_retriever()
_lowerCAmelCase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , a__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_lowerCAmelCase : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(a__ )
_lowerCAmelCase : Dict = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
_lowerCAmelCase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : int = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self ):
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
_lowerCAmelCase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , a__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ):
_lowerCAmelCase : Any = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
_lowerCAmelCase : Tuple = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
_lowerCAmelCase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : Any = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self ):
_lowerCAmelCase : Any = 1
_lowerCAmelCase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
_lowerCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , a__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
_lowerCAmelCase : List[Any] = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
_lowerCAmelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[Any] = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self ):
_lowerCAmelCase : int = 1
_lowerCAmelCase : Tuple = self.get_dummy_legacy_index_retriever()
_lowerCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , a__ )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ):
_lowerCAmelCase : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
_lowerCAmelCase : Tuple = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
_lowerCAmelCase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[str] = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self ):
import torch
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : int = self.get_dummy_canonical_hf_index_retriever()
_lowerCAmelCase : Optional[Any] = [[5, 7], [10, 11]]
_lowerCAmelCase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[str] = retriever(a__ , a__ , prefix=retriever.config.generator.prefix , n_docs=a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a__ , a__ )
self.assertIsInstance(a__ , a__ )
self.assertIsInstance(a__ , np.ndarray )
_lowerCAmelCase : int = retriever(
a__ , a__ , prefix=retriever.config.generator.prefix , n_docs=a__ , return_tensors="""pt""" , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a__ , torch.Tensor )
self.assertIsInstance(a__ , torch.Tensor )
self.assertIsInstance(a__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : int = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
retriever.set_ctx_encoder_tokenizer(a__ )
_lowerCAmelCase : Any = [[5, 7], [10, 11]]
_lowerCAmelCase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[Any] = retriever(a__ , a__ , prefix=retriever.config.generator.prefix , n_docs=a__ )
self.assertEqual(
len(a__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , a__ ) # check for doc token related keys in dictionary.
| 44
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __A :
def __init__( self , a__ = None ):
if components is None:
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : str = list(a__ )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(a__ , self.__components ) ) + ")"
def __add__( self , a__ ):
_lowerCAmelCase : List[str] = len(self )
if size == len(a__ ):
_lowerCAmelCase : str = [self.__components[i] + other.component(a__ ) for i in range(a__ )]
return Vector(a__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , a__ ):
_lowerCAmelCase : int = len(self )
if size == len(a__ ):
_lowerCAmelCase : Optional[Any] = [self.__components[i] - other.component(a__ ) for i in range(a__ )]
return Vector(a__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , a__ ):
...
@overload
def __mul__( self , a__ ):
...
def __mul__( self , a__ ):
if isinstance(a__ , (float, int) ):
_lowerCAmelCase : List[Any] = [c * other for c in self.__components]
return Vector(a__ )
elif isinstance(a__ , a__ ) and len(self ) == len(a__ ):
_lowerCAmelCase : List[Any] = len(self )
_lowerCAmelCase : List[str] = [self.__components[i] * other.component(a__ ) for i in range(a__ )]
return sum(a__ )
else: # error case
raise Exception("""invalid operand!""" )
def __A ( self ):
return Vector(self.__components )
def __A ( self , a__ ):
if isinstance(a__ , a__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __A ( self , a__ , a__ ):
assert -len(self.__components ) <= pos < len(self.__components )
_lowerCAmelCase : int = value
def __A ( self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
_lowerCAmelCase : int = [c**2 for c in self.__components]
return math.sqrt(sum(a__ ) )
def __A ( self , a__ , a__ = False ):
_lowerCAmelCase : List[Any] = self * other
_lowerCAmelCase : int = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Vector:
assert isinstance(_lowerCamelCase ,_lowerCamelCase )
return Vector([0] * dimension )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> Vector:
assert isinstance(_lowerCamelCase ,_lowerCamelCase ) and (isinstance(_lowerCamelCase ,_lowerCamelCase ))
_lowerCAmelCase : Union[str, Any] = [0] * dimension
_lowerCAmelCase : List[Any] = 1
return Vector(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : Vector ,_lowerCamelCase : Vector ) -> Vector:
assert (
isinstance(_lowerCamelCase ,_lowerCamelCase )
and isinstance(_lowerCamelCase ,_lowerCamelCase )
and (isinstance(_lowerCamelCase ,(int, float) ))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Vector:
random.seed(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = [random.randint(_lowerCamelCase ,_lowerCamelCase ) for _ in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
class __A :
def __init__( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = matrix
_lowerCAmelCase : Dict = w
_lowerCAmelCase : Optional[int] = h
def __str__( self ):
_lowerCAmelCase : Tuple = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , a__ ):
if self.__width == other.width() and self.__height == other.height():
_lowerCAmelCase : str = []
for i in range(self.__height ):
_lowerCAmelCase : List[Any] = [
self.__matrix[i][j] + other.component(a__ , a__ )
for j in range(self.__width )
]
matrix.append(a__ )
return Matrix(a__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , a__ ):
if self.__width == other.width() and self.__height == other.height():
_lowerCAmelCase : Any = []
for i in range(self.__height ):
_lowerCAmelCase : Any = [
self.__matrix[i][j] - other.component(a__ , a__ )
for j in range(self.__width )
]
matrix.append(a__ )
return Matrix(a__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , a__ ):
...
@overload
def __mul__( self , a__ ):
...
def __mul__( self , a__ ):
if isinstance(a__ , a__ ): # matrix-vector
if len(a__ ) == self.__width:
_lowerCAmelCase : int = zero_vector(self.__height )
for i in range(self.__height ):
_lowerCAmelCase : Dict = [
self.__matrix[i][j] * other.component(a__ )
for j in range(self.__width )
]
ans.change_component(a__ , sum(a__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(a__ , (int, float) ): # matrix-scalar
_lowerCAmelCase : Union[str, Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(a__ , self.__width , self.__height )
return None
def __A ( self ):
return self.__height
def __A ( self ):
return self.__width
def __A ( self , a__ , a__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __A ( self , a__ , a__ , a__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
_lowerCAmelCase : List[str] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __A ( self , a__ , a__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
_lowerCAmelCase : Optional[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(a__ ) ):
_lowerCAmelCase : Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(a__ , self.__width - 1 , self.__height - 1 ).determinant()
def __A ( self , a__ , a__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(a__ , a__ )
else:
raise Exception("""Indices out of bounds""" )
def __A ( self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_lowerCAmelCase : Dict = [
self.__matrix[0][y] * self.cofactor(0 , a__ ) for y in range(self.__width )
]
return sum(a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Matrix:
_lowerCAmelCase : list[list[float]] = [[0] * n for _ in range(_lowerCamelCase )]
return Matrix(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Matrix:
random.seed(_lowerCamelCase )
_lowerCAmelCase : list[list[float]] = [
[random.randint(_lowerCamelCase ,_lowerCamelCase ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )
]
return Matrix(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
| 44
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 1
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[str] ,_lowerCamelCase : Any ) -> Union[str, Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : Optional[str] ,_lowerCamelCase : Optional[str] = None ) -> Optional[Any]:
_lowerCAmelCase : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
_lowerCAmelCase : Optional[Any] = to_pil_image(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : str = pil_image.size
_lowerCAmelCase : Dict = pytesseract.image_to_data(_lowerCamelCase ,lang=_lowerCamelCase ,output_type="""dict""" ,config=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_lowerCAmelCase : List[str] = [idx for idx, word in enumerate(_lowerCamelCase ) if not word.strip()]
_lowerCAmelCase : List[Any] = [word for idx, word in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Tuple = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowerCAmelCase : List[str] = []
for x, y, w, h in zip(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_lowerCamelCase )
# finally, normalize the bounding boxes
_lowerCAmelCase : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = True , a__ = None , a__ = "" , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Tuple = size if size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : Dict = get_size_dict(a__ )
_lowerCAmelCase : int = do_resize
_lowerCAmelCase : Optional[int] = size
_lowerCAmelCase : Optional[Any] = resample
_lowerCAmelCase : List[Any] = apply_ocr
_lowerCAmelCase : Dict = ocr_lang
_lowerCAmelCase : Tuple = tesseract_config
def __A ( self , a__ , a__ , a__ = PILImageResampling.BILINEAR , a__ = None , **a__ , ):
_lowerCAmelCase : Dict = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_lowerCAmelCase : Optional[int] = (size["""height"""], size["""width"""])
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = size if size is not None else self.size
_lowerCAmelCase : Optional[Any] = get_size_dict(a__ )
_lowerCAmelCase : List[Any] = resample if resample is not None else self.resample
_lowerCAmelCase : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowerCAmelCase : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowerCAmelCase : int = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowerCAmelCase : str = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : Optional[int] = [to_numpy_array(a__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Optional[Any] = []
for image in images:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = apply_tesseract(a__ , a__ , a__ )
words_batch.append(a__ )
boxes_batch.append(a__ )
if do_resize:
_lowerCAmelCase : List[Any] = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowerCAmelCase : Union[str, Any] = [flip_channel_order(a__ ) for image in images]
_lowerCAmelCase : Tuple = [to_channel_dimension_format(a__ , a__ ) for image in images]
_lowerCAmelCase : Any = BatchFeature(data={"""pixel_values""": images} , tensor_type=a__ )
if apply_ocr:
_lowerCAmelCase : Optional[Any] = words_batch
_lowerCAmelCase : Optional[int] = boxes_batch
return data
| 44
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Optional[Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = "vit_msn"
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=1e-06 , a__=224 , a__=16 , a__=3 , a__=True , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : Union[str, Any] = qkv_bias
| 44
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 1
|
"""simple docstring"""
from __future__ import annotations
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : Any = data
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : List[Any] = Node(1 )
_lowerCAmelCase : Optional[int] = Node(2 )
_lowerCAmelCase : str = Node(3 )
_lowerCAmelCase : Tuple = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
_lowerCAmelCase : Optional[Any] = Node(6 )
_lowerCAmelCase : List[Any] = Node(7 )
_lowerCAmelCase : List[Any] = Node(8 )
_lowerCAmelCase : int = Node(9 )
print(is_full_binary_tree(_lowerCamelCase ) )
print(depth_of_tree(_lowerCamelCase ) )
print("""Tree is: """ )
display(_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 1
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_a : Optional[int] = logging.get_logger(__name__)
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = UNetaDModel
_UpperCamelCase : int = "sample"
@property
def __A ( self ):
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : List[str] = (32, 32)
_lowerCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : Dict = torch.tensor([10] ).to(a__ )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ):
return (3, 32, 32)
@property
def __A ( self ):
return (3, 32, 32)
def __A ( self ):
_lowerCAmelCase : str = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_lowerCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = UNetaDModel
_UpperCamelCase : Dict = "sample"
@property
def __A ( self ):
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : int = 4
_lowerCAmelCase : Optional[int] = (32, 32)
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : int = torch.tensor([10] ).to(a__ )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ):
return (4, 32, 32)
@property
def __A ( self ):
return (4, 32, 32)
def __A ( self ):
_lowerCAmelCase : Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_lowerCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(a__ )
_lowerCAmelCase : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ )
model.to(a__ )
_lowerCAmelCase : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_lowerCAmelCase , _lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ )
model_accelerate.to(a__ )
model_accelerate.eval()
_lowerCAmelCase : int = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase : Optional[int] = noise.to(a__ )
_lowerCAmelCase : Dict = torch.tensor([10] * noise.shape[0] ).to(a__ )
_lowerCAmelCase : Dict = model_accelerate(a__ , a__ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ , low_cpu_mem_usage=a__ )
model_normal_load.to(a__ )
model_normal_load.eval()
_lowerCAmelCase : Dict = model_normal_load(a__ , a__ )["""sample"""]
assert torch_all_close(a__ , a__ , rtol=1e-3 )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(a__ )
_lowerCAmelCase : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase : List[str] = noise.to(a__ )
_lowerCAmelCase : Any = torch.tensor([10] * noise.shape[0] ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(a__ , a__ ).sample
_lowerCAmelCase : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowerCAmelCase : Optional[int] = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-3 ) )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = UNetaDModel
_UpperCamelCase : Any = "sample"
@property
def __A ( self , a__=(32, 32) ):
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : Dict = 3
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=a__ )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ):
return (3, 32, 32)
@property
def __A ( self ):
return (3, 32, 32)
def __A ( self ):
_lowerCAmelCase : List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_lowerCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
@slow
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(a__ )
_lowerCAmelCase : str = self.dummy_input
_lowerCAmelCase : Optional[Any] = floats_tensor((4, 3) + (256, 256) ).to(a__ )
_lowerCAmelCase : Any = noise
_lowerCAmelCase : str = model(**a__ )
assert image is not None, "Make sure output is not None"
@slow
def __A ( self ):
_lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(a__ )
_lowerCAmelCase : str = 4
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : Any = (256, 256)
_lowerCAmelCase : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : str = torch.tensor(batch_size * [1e-4] ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , a__ ).sample
_lowerCAmelCase : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCAmelCase : int = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-2 ) )
def __A ( self ):
_lowerCAmelCase : int = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(a__ )
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : Tuple = 3
_lowerCAmelCase : int = (32, 32)
_lowerCAmelCase : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : List[Any] = torch.tensor(batch_size * [1e-4] ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : int = model(a__ , a__ ).sample
_lowerCAmelCase : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCAmelCase : Optional[Any] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-2 ) )
def __A ( self ):
# not required for this model
pass
| 44
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __A ( self ):
_lowerCAmelCase : Dict = self.dummy_uncond_unet
_lowerCAmelCase : List[str] = KarrasVeScheduler()
_lowerCAmelCase : List[Any] = KarrasVePipeline(unet=a__ , scheduler=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : Tuple = pipe(num_inference_steps=2 , generator=a__ , output_type="""numpy""" ).images
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=a__ , output_type="""numpy""" , return_dict=a__ )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : int = """google/ncsnpp-celebahq-256"""
_lowerCAmelCase : str = UNetaDModel.from_pretrained(a__ )
_lowerCAmelCase : Optional[Any] = KarrasVeScheduler()
_lowerCAmelCase : Dict = KarrasVePipeline(unet=a__ , scheduler=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : Tuple = pipe(num_inference_steps=20 , generator=a__ , output_type="""numpy""" ).images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : List[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 44
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> int:
_lowerCAmelCase , _lowerCAmelCase : str = emb.weight.shape
_lowerCAmelCase : List[str] = nn.Linear(_lowerCamelCase ,_lowerCamelCase ,bias=_lowerCamelCase )
_lowerCAmelCase : Tuple = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Union[str, Any]=None ) -> List[Any]:
_lowerCAmelCase : Any = {}
for old_key in state_dict.keys():
_lowerCAmelCase : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_lowerCAmelCase : Optional[int] = key.replace("""moe_layer.experts.0""" ,f"ffn.experts.expert_{expert_idx}" )
else:
_lowerCAmelCase : Any = key.replace("""moe_layer.experts.""" ,"""ffn.experts.expert_""" )
if "gate" in key:
_lowerCAmelCase : int = key.replace(""".moe_layer.gate.wg""" ,""".ffn.router.classifier""" )
if "fc2" and "experts" not in key:
_lowerCAmelCase : List[Any] = key.replace(""".fc2.""" ,""".ffn.fc2.""" )
if "fc1" and "experts" not in key:
_lowerCAmelCase : Optional[int] = key.replace(""".fc1.""" ,""".ffn.fc1.""" )
if ".encoder_attn." in key:
_lowerCAmelCase : List[str] = key.replace(""".encoder_attn.""" ,""".cross_attention.""" )
if "encoder_attn_layer_norm" in key:
_lowerCAmelCase : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" ,"""cross_attention_layer_norm""" )
if "final_layer_norm" in key:
_lowerCAmelCase : Optional[int] = key.replace("""final_layer_norm""" ,"""ff_layer_norm""" )
_lowerCAmelCase : Union[str, Any] = state_dict[old_key]
return new_dict
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : List[str] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str = WEIGHTS_NAME ) -> int:
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = 0
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = switch_checkpoint_path + f"-rank-{expert}.pt"
if os.path.isfile(_lowerCamelCase ):
_lowerCAmelCase : List[str] = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : List[str] = rename_fairseq_keys(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = os.path.join(
_lowerCamelCase ,weights_name.replace(""".bin""" ,f"-{len(_lowerCamelCase )+1:05d}-of-???.bin" ) )
torch.save(_lowerCamelCase ,_lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase ,weights_name.replace(""".bin""" ,f"-{len(_lowerCamelCase )+1:05d}-of-???.bin" ) )
_lowerCAmelCase : Tuple = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase ,_lowerCamelCase )
torch.save(_lowerCamelCase ,_lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase ,_lowerCamelCase )
# Otherwise, let's build the index
_lowerCAmelCase : int = {}
for idx, shard in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = weights_name.replace(""".bin""" ,f"-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin" )
_lowerCAmelCase : int = os.path.join(_lowerCamelCase ,weights_name.replace(""".bin""" ,f"-{idx+1:05d}-of-???.bin" ) )
os.rename(_lowerCamelCase ,os.path.join(_lowerCamelCase ,_lowerCamelCase ) )
for key in shard:
_lowerCAmelCase : Tuple = shard_file
# Add the metadata
_lowerCAmelCase : Any = {"""total_size""": total_size}
_lowerCAmelCase : List[str] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase ,_lowerCamelCase ) ,"""w""" ,encoding="""utf-8""" ) as f:
_lowerCAmelCase : List[str] = json.dumps(_lowerCamelCase ,indent=2 ,sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_a : List[Any] = parser.parse_args()
_a , _a : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_a : Any = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_a : List[str] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 44
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : int = 16
_a : str = 32
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Accelerator ,_lowerCamelCase : int = 16 ) -> List[str]:
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase : Any = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(_lowerCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : Optional[int] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_lowerCamelCase ,max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : Tuple = datasets.map(
_lowerCamelCase ,batched=_lowerCamelCase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Tuple = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_lowerCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Any = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : List[Any] = 8
else:
_lowerCAmelCase : Dict = None
return tokenizer.pad(
_lowerCamelCase ,padding="""longest""" ,max_length=_lowerCamelCase ,pad_to_multiple_of=_lowerCamelCase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_lowerCAmelCase : List[str] = DataLoader(
tokenized_datasets["""train"""] ,shuffle=_lowerCamelCase ,collate_fn=_lowerCamelCase ,batch_size=_lowerCamelCase )
_lowerCAmelCase : List[Any] = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=_lowerCamelCase ,collate_fn=_lowerCamelCase ,batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Any = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ) -> List[str]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,_lowerCamelCase ) == "1":
_lowerCAmelCase : str = 2
# Initialize accelerator
_lowerCAmelCase : List[str] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : List[str] = config["""lr"""]
_lowerCAmelCase : Tuple = int(config["""num_epochs"""] )
_lowerCAmelCase : List[str] = int(config["""seed"""] )
_lowerCAmelCase : Union[str, Any] = int(config["""batch_size"""] )
_lowerCAmelCase : List[Any] = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
_lowerCAmelCase : Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_lowerCAmelCase : Optional[int] = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : int = get_dataloaders(_lowerCamelCase ,_lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : List[str] = AdamW(params=model.parameters() ,lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCAmelCase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase ,num_warmup_steps=100 ,num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = accelerator.prepare(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase : Optional[int] = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = outputs.loss
_lowerCAmelCase : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_lowerCAmelCase : List[str] = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : str = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_lowerCAmelCase : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCamelCase ,references=_lowerCamelCase ,)
_lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
_lowerCAmelCase : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 1
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : int ,_lowerCamelCase : Optional[int] ) -> str:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowerCAmelCase : Optional[int] = mf_knapsack(i - 1 ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = max(
mf_knapsack(i - 1 ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) ,mf_knapsack(i - 1 ,_lowerCamelCase ,_lowerCamelCase ,j - wt[i - 1] ) + val[i - 1] ,)
_lowerCAmelCase : Optional[Any] = val
return f[i][j]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : int ,_lowerCamelCase : Tuple ,_lowerCamelCase : Any ) -> Any:
_lowerCAmelCase : List[str] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 ,n + 1 ):
for w_ in range(1 ,w + 1 ):
if wt[i - 1] <= w_:
_lowerCAmelCase : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] ,dp[i - 1][w_] )
else:
_lowerCAmelCase : Dict = dp[i - 1][w_]
return dp[n][w_], dp
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : list ,_lowerCamelCase : list ) -> Optional[int]:
if not (isinstance(_lowerCamelCase ,(list, tuple) ) and isinstance(_lowerCamelCase ,(list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
if num_items != len(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = (
"""The number of weights must be the same as the number of values.\n"""
f"But got {num_items} weights and {len(_lowerCamelCase )} values"
)
raise ValueError(_lowerCamelCase )
for i in range(_lowerCamelCase ):
if not isinstance(wt[i] ,_lowerCamelCase ):
_lowerCAmelCase : str = (
"""All weights must be integers but got weight of """
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Tuple = knapsack(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : set = set()
_construct_solution(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return optimal_val, example_optional_set
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : set ) -> Optional[int]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowerCamelCase ,_lowerCamelCase ,i - 1 ,_lowerCamelCase ,_lowerCamelCase )
else:
optimal_set.add(_lowerCamelCase )
_construct_solution(_lowerCamelCase ,_lowerCamelCase ,i - 1 ,j - wt[i - 1] ,_lowerCamelCase )
if __name__ == "__main__":
_a : str = [3, 2, 4, 4]
_a : List[Any] = [4, 3, 2, 3]
_a : List[Any] = 4
_a : Union[str, Any] = 6
_a : Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_a , _a : Optional[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_a , _a : List[str] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 44
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : Union[str, Any] = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 1
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a : int = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __A :
_UpperCamelCase : Dict = PegasusConfig
_UpperCamelCase : List[Any] = {}
_UpperCamelCase : Any = "gelu"
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__=0.1 , a__=0.1 , a__=20 , a__=2 , a__=1 , a__=0 , ):
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : List[str] = eos_token_id
_lowerCAmelCase : List[str] = pad_token_id
_lowerCAmelCase : Tuple = bos_token_id
def __A ( self ):
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowerCAmelCase : List[Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase : List[Any] = prepare_pegasus_inputs_dict(a__ , a__ , a__ )
return config, inputs_dict
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : str = 20
_lowerCAmelCase : Tuple = model_class_name(a__ )
_lowerCAmelCase : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_lowerCAmelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : Tuple = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
_lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , a__ , decoder_attention_mask=a__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a__ , )
_lowerCAmelCase : Optional[int] = model.decode(a__ , a__ )
_lowerCAmelCase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = 20
_lowerCAmelCase : Optional[Any] = model_class_name(a__ )
_lowerCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCAmelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
_lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
_lowerCAmelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , a__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a__ , decoder_position_ids=a__ , )
_lowerCAmelCase : Any = model.decode(a__ , a__ , decoder_attention_mask=a__ )
_lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any]=None ,_lowerCamelCase : Dict=None ,) -> Union[str, Any]:
if attention_mask is None:
_lowerCAmelCase : int = np.not_equal(_lowerCamelCase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowerCAmelCase : Union[str, Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Tuple = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_UpperCamelCase : str = True
_UpperCamelCase : str = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
def __A ( self ):
_lowerCAmelCase : Dict = FlaxPegasusModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : List[str] = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : List[Any] = model_class(a__ )
@jax.jit
def encode_jitted(a__ , a__=None , **a__ ):
return model.encode(input_ids=a__ , attention_mask=a__ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : List[str] = encode_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : Optional[Any] = encode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : str = model_class(a__ )
_lowerCAmelCase : Optional[int] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_lowerCAmelCase : Optional[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(a__ , a__ , a__ ):
return model.decode(
decoder_input_ids=a__ , decoder_attention_mask=a__ , encoder_outputs=a__ , )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Optional[int] = decode_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : Tuple = decode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Any = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=a__ )
_lowerCAmelCase : List[str] = np.ones((1, 1) )
_lowerCAmelCase : Optional[Any] = model(a__ )
self.assertIsNotNone(a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_lowerCAmelCase : int = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_lowerCAmelCase : List[str] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_lowerCAmelCase : List[Any] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_lowerCAmelCase : Optional[int] = tokenizer(a__ , return_tensors="""np""" , truncation=a__ , max_length=512 , padding=a__ )
_lowerCAmelCase : Optional[Any] = model.generate(**a__ , num_beams=2 ).sequences
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
assert tgt_text == decoded
| 44
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 1
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : List[str] = logging.get_logger(__name__)
_a : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
_UpperCamelCase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Model type selected in the list: " + ", ".join(SCREAMING_SNAKE_CASE_ )} )
_UpperCamelCase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_UpperCamelCase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : int = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_UpperCamelCase : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_UpperCamelCase : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_UpperCamelCase : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_UpperCamelCase : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_UpperCamelCase : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_UpperCamelCase : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = "train"
_UpperCamelCase : List[str] = "dev"
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : SquadDataTrainingArguments
_UpperCamelCase : List[SquadFeatures]
_UpperCamelCase : Split
_UpperCamelCase : bool
def __init__( self , a__ , a__ , a__ = None , a__ = Split.train , a__ = False , a__ = None , a__ = "pt" , ):
_lowerCAmelCase : List[Any] = args
_lowerCAmelCase : str = is_language_sensitive
_lowerCAmelCase : Tuple = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a__ , a__ ):
try:
_lowerCAmelCase : Any = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase : Union[str, Any] = mode
# Load data features from cache or dataset file
_lowerCAmelCase : Optional[int] = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase : Dict = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : int = cached_features_file + """.lock"""
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : Optional[Any] = torch.load(a__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase : Optional[int] = self.old_features["""features"""]
_lowerCAmelCase : Optional[int] = self.old_features.get("""dataset""" , a__ )
_lowerCAmelCase : Optional[int] = self.old_features.get("""examples""" , a__ )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase : int = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a__ , )
_lowerCAmelCase : Optional[Any] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , a__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
return len(self.features )
def __getitem__( self , a__ ):
# Convert to Tensors and build dataset
_lowerCAmelCase : Union[str, Any] = self.features[i]
_lowerCAmelCase : Optional[Any] = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase : str = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase : List[Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase : Any = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase : Any = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 1
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Dict = {'vocab_file': 'vocab.txt'}
_a : str = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
_a : List[str] = {
'openbmb/cpm-ant-10b': 1_024,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Tuple:
_lowerCAmelCase : List[str] = collections.OrderedDict()
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as reader:
_lowerCAmelCase : Tuple = reader.readlines()
for index, token in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = token.rstrip("""\n""" )
_lowerCAmelCase : str = index
return vocab
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__="<unk>" , a__=200 ):
_lowerCAmelCase : Optional[Any] = vocab
_lowerCAmelCase : Any = unk_token
_lowerCAmelCase : int = max_input_chars_per_word
def __A ( self , a__ ):
_lowerCAmelCase : Any = list(a__ )
if len(a__ ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Dict = []
while start < len(a__ ):
_lowerCAmelCase : Tuple = len(a__ )
_lowerCAmelCase : int = None
while start < end:
_lowerCAmelCase : str = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a__ )
_lowerCAmelCase : Union[str, Any] = end
return sub_tokens
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
_UpperCamelCase : str = False
def __init__( self , a__ , a__="<d>" , a__="</d>" , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__="</n>" , a__="</_>" , a__="left" , **a__ , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=a__ , eod_token=a__ , bos_token=a__ , eos_token=a__ , pad_token=a__ , unk_token=a__ , line_token=a__ , space_token=a__ , padding_side=a__ , **a__ , )
_lowerCAmelCase : Union[str, Any] = bod_token
_lowerCAmelCase : List[Any] = eod_token
_lowerCAmelCase : List[str] = load_vocab(a__ )
_lowerCAmelCase : Tuple = self.encoder[space_token]
_lowerCAmelCase : Optional[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a__ : x[1] ) )
_lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __A ( self ):
return self.encoder[self.bod_token]
@property
def __A ( self ):
return self.encoder[self.eod_token]
@property
def __A ( self ):
return self.encoder["\n"]
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = []
for x in jieba.cut(a__ , cut_all=a__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a__ ) )
return output_tokens
def __A ( self , a__ , **a__ ):
_lowerCAmelCase : Any = [i for i in token_ids if i >= 0]
_lowerCAmelCase : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a__ , **a__ )
def __A ( self , a__ ):
return token in self.encoder
def __A ( self , a__ ):
return "".join(a__ )
def __A ( self , a__ ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
return self.decoder.get(a__ , self.unk_token )
def __A ( self , a__ , a__ = None ):
if os.path.isdir(a__ ):
_lowerCAmelCase : int = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase : List[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase : Any = 0
if " " in self.encoder:
_lowerCAmelCase : int = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase : int = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a__ : x[1] ) )
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase : List[str] = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ ))
return [1] + ([0] * len(a__ ))
| 44
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 1
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = BertJapaneseTokenizer
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[Any] = True
def __A ( self ):
super().setUp()
_lowerCAmelCase : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , a__ ):
_lowerCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。"""
_lowerCAmelCase : Tuple = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def __A ( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase : int = self.get_input_output_texts(a__ )
_lowerCAmelCase : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
return text, ids
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
_lowerCAmelCase : int = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : str = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(a__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __A ( self ):
_lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(a__ )
_lowerCAmelCase : str = """こんにちは、世界。\nこんばんは、世界。"""
_lowerCAmelCase : int = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(a__ , """wb""" ) as handle:
pickle.dump(a__ , a__ )
with open(a__ , """rb""" ) as handle:
_lowerCAmelCase : List[Any] = pickle.load(a__ )
_lowerCAmelCase : List[Any] = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def __A ( self ):
try:
_lowerCAmelCase : List[Any] = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def __A ( self ):
try:
_lowerCAmelCase : str = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def __A ( self ):
_lowerCAmelCase : Optional[int] = MecabTokenizer(do_lower_case=a__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def __A ( self ):
try:
_lowerCAmelCase : Any = MecabTokenizer(
do_lower_case=a__ , normalize_text=a__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def __A ( self ):
_lowerCAmelCase : List[str] = MecabTokenizer(normalize_text=a__ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(a__ )
_lowerCAmelCase : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
_lowerCAmelCase : int = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(a__ , """wb""" ) as handle:
pickle.dump(a__ , a__ )
with open(a__ , """rb""" ) as handle:
_lowerCAmelCase : Union[str, Any] = pickle.load(a__ )
_lowerCAmelCase : Tuple = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ , a__ )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Tuple = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : str = SudachiTokenizer(do_lower_case=a__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : Dict = SudachiTokenizer(normalize_text=a__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def __A ( self ):
_lowerCAmelCase : int = SudachiTokenizer(trim_whitespace=a__ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(a__ )
_lowerCAmelCase : Any = """こんにちは、世界。\nこんばんは、世界。"""
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(a__ , """wb""" ) as handle:
pickle.dump(a__ , a__ )
with open(a__ , """rb""" ) as handle:
_lowerCAmelCase : List[Any] = pickle.load(a__ )
_lowerCAmelCase : str = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ , a__ )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : str = JumanppTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : str = JumanppTokenizer(normalize_text=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = JumanppTokenizer(trim_whitespace=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def __A ( self ):
_lowerCAmelCase : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def __A ( self ):
_lowerCAmelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
_lowerCAmelCase : Union[str, Any] = {}
for i, token in enumerate(a__ ):
_lowerCAmelCase : List[str] = i
_lowerCAmelCase : Optional[int] = WordpieceTokenizer(vocab=a__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def __A ( self ):
_lowerCAmelCase : Any = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
_lowerCAmelCase : Union[str, Any] = tokenizer.subword_tokenizer
_lowerCAmelCase : Any = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(a__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
_lowerCAmelCase : Optional[int] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(a__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=a__ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=a__ )
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(a__ )
_lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = BertJapaneseTokenizer
_UpperCamelCase : Optional[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , **a__ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = """こんにちは、世界。 \nこんばんは、世界。"""
_lowerCAmelCase : Optional[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
pass # TODO add if relevant
def __A ( self ):
_lowerCAmelCase : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
a__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_lowerCAmelCase : List[Any] = {}
for i, token in enumerate(a__ ):
_lowerCAmelCase : int = i
_lowerCAmelCase : str = CharacterTokenizer(vocab=a__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def __A ( self ):
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
_lowerCAmelCase : List[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a__ )
_lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = """cl-tohoku/bert-base-japanese"""
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
_lowerCAmelCase : Dict = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 44
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 1
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __A :
def __init__( self , a__ , a__=sys.maxsize ):
_lowerCAmelCase : str = """bilinear"""
_lowerCAmelCase : List[str] = max_size
_lowerCAmelCase : Optional[Any] = short_edge_length
def __call__( self , a__ ):
_lowerCAmelCase : int = []
for img in imgs:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
_lowerCAmelCase : Tuple = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_lowerCAmelCase : Union[str, Any] = size * 1.0 / min(a__ , a__ )
if h < w:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = size, scale * w
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = scale * h, size
if max(a__ , a__ ) > self.max_size:
_lowerCAmelCase : Optional[Any] = self.max_size * 1.0 / max(a__ , a__ )
_lowerCAmelCase : List[str] = newh * scale
_lowerCAmelCase : Union[str, Any] = neww * scale
_lowerCAmelCase : List[Any] = int(neww + 0.5 )
_lowerCAmelCase : Optional[int] = int(newh + 0.5 )
if img.dtype == np.uinta:
_lowerCAmelCase : Dict = Image.fromarray(a__ )
_lowerCAmelCase : Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_lowerCAmelCase : List[Any] = np.asarray(a__ )
else:
_lowerCAmelCase : Tuple = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_lowerCAmelCase : Tuple = nn.functional.interpolate(
a__ , (newh, neww) , mode=self.interp_method , align_corners=a__ ).squeeze(0 )
img_augs.append(a__ )
return img_augs
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_lowerCAmelCase : List[str] = cfg.INPUT.FORMAT
_lowerCAmelCase : List[Any] = cfg.SIZE_DIVISIBILITY
_lowerCAmelCase : Any = cfg.PAD_VALUE
_lowerCAmelCase : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
_lowerCAmelCase : List[str] = cfg.MODEL.DEVICE
_lowerCAmelCase : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowerCAmelCase : List[str] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowerCAmelCase : Union[str, Any] = lambda a__ : (x - self.pixel_mean) / self.pixel_std
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = tuple(max(a__ ) for s in zip(*[img.shape for img in images] ) )
_lowerCAmelCase : Optional[Any] = [im.shape[-2:] for im in images]
_lowerCAmelCase : Optional[Any] = [
nn.functional.pad(
a__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(a__ , a__ )
]
return torch.stack(a__ ), torch.tensor(a__ )
def __call__( self , a__ , a__=False ):
with torch.no_grad():
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[Any] = [images]
if single_image:
assert len(a__ ) == 1
for i in range(len(a__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(a__ , images.pop(a__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
a__ , torch.as_tensor(img_tensorize(images.pop(a__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_lowerCAmelCase : Union[str, Any] = torch.tensor([im.shape[:2] for im in images] )
_lowerCAmelCase : Optional[int] = self.aug(a__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_lowerCAmelCase : List[Any] = [self.normalizer(a__ ) for x in images]
# now pad them to do the following operations
_lowerCAmelCase , _lowerCAmelCase : Any = self.pad(a__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_lowerCAmelCase : Optional[int] = torch.true_divide(a__ , a__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Any ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple[int, int] ) -> Tuple:
assert torch.isfinite(_lowerCamelCase ).all(), "Box tensor contains infinite or NaN!"
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = box_size
tensor[:, 0].clamp_(min=0 ,max=_lowerCamelCase )
tensor[:, 1].clamp_(min=0 ,max=_lowerCamelCase )
tensor[:, 2].clamp_(min=0 ,max=_lowerCamelCase )
tensor[:, 3].clamp_(min=0 ,max=_lowerCamelCase )
| 44
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_UpperCamelCase : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"text": Value("string" )} )
_UpperCamelCase : ClassVar[Features] = Features({"summary": Value("string" )} )
_UpperCamelCase : str = "text"
_UpperCamelCase : str = "summary"
@property
def __A ( self ):
return {self.text_column: "text", self.summary_column: "summary"}
| 44
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : str = 1
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 ,_lowerCamelCase ):
_lowerCAmelCase : str = 0
_lowerCAmelCase : int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Optional[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Union[str, Any] = counter
if counter > pre_counter:
_lowerCAmelCase : List[str] = inputa
_lowerCAmelCase : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 1
|
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : str = '▁'
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = BertGenerationTokenizer
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
_lowerCAmelCase : List[str] = BertGenerationTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : Dict = """<s>"""
_lowerCAmelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1002 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = BertGenerationTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [285, 46, 10, 170, 382] , )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = """Hello World!"""
_lowerCAmelCase : Union[str, Any] = [18536, 2260, 101]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : List[str] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_lowerCAmelCase : Dict = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@require_torch
@slow
def __A ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_lowerCAmelCase : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCAmelCase : List[str] = """ """.join(a__ )
_lowerCAmelCase : Union[str, Any] = self.big_tokenizer.encode_plus(a__ , return_tensors="""pt""" , return_token_type_ids=a__ )
_lowerCAmelCase : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=a__ )
_lowerCAmelCase : int = BertGenerationConfig()
_lowerCAmelCase : Optional[Any] = BertGenerationEncoder(a__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a__ )
model(**a__ )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : Dict = {"""input_ids""": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 44
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : List[Any] = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = "ibert"
def __init__( self , a__=30522 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=False , a__="none" , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Optional[int] = position_embedding_type
_lowerCAmelCase : Any = quant_mode
_lowerCAmelCase : Union[str, Any] = force_dequant
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 44
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Tuple:
_lowerCAmelCase : Any = tf.convert_to_tensor(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : str = tf.convert_to_tensor(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tf.cast(math.pi ,x.dtype )
_lowerCAmelCase : int = tf.cast(0.04_47_15 ,x.dtype )
_lowerCAmelCase : Tuple = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCamelCase ,3 )) ))
return x * cdf
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(_lowerCamelCase )
return x * tf.tanh(tf.math.softplus(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCamelCase )
_lowerCAmelCase : Tuple = tf.cast(0.04_47_15 ,x.dtype )
_lowerCAmelCase : Union[str, Any] = tf.cast(0.79_78_84_56_08 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(_lowerCamelCase )
_lowerCAmelCase : Tuple = tf.cast(1.7_02 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Dict:
return tf.clip_by_value(_gelu(_lowerCamelCase ) ,-10 ,10 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Dict=-1 ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase : Dict = tf.split(_lowerCamelCase ,2 ,axis=_lowerCamelCase )
return a * tf.math.sigmoid(_lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> List[Any]:
return tf.keras.activations.gelu(_lowerCamelCase ,approximate=_lowerCamelCase )
_a : Optional[int] = tf.keras.activations.gelu
_a : Any = approximate_gelu_wrap
else:
_a : Dict = _gelu
_a : int = _gelu_new
_a : Dict = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> List[str]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 44
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __A :
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : float
_UpperCamelCase : float
_UpperCamelCase : Tuple[int]
def __A ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __A ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __A ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __A ( self ):
_lowerCAmelCase : Any = torch.arange(self.height * self.width )
_lowerCAmelCase : Dict = torch.stack(
[
pixel_indices % self.width,
torch.div(a__ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def __A ( self ):
_lowerCAmelCase , *_lowerCAmelCase : Dict = self.shape
_lowerCAmelCase : int = int(np.prod(a__ ) )
_lowerCAmelCase : List[Any] = self.get_image_coords()
_lowerCAmelCase : str = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_lowerCAmelCase : Optional[Any] = self.get_camera_rays(a__ )
_lowerCAmelCase : str = rays.view(a__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __A ( self , a__ ):
_lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase : Dict = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_lowerCAmelCase : Optional[Any] = coords.view(a__ , -1 , 2 )
_lowerCAmelCase : Optional[Any] = self.resolution()
_lowerCAmelCase : Any = self.fov()
_lowerCAmelCase : List[Any] = (flat.float() / (res - 1)) * 2 - 1
_lowerCAmelCase : Optional[Any] = fracs * torch.tan(fov / 2 )
_lowerCAmelCase : Optional[Any] = fracs.view(a__ , -1 , 2 )
_lowerCAmelCase : Union[str, Any] = (
self.z.view(a__ , 1 , 3 )
+ self.x.view(a__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a__ , 1 , 3 ) * fracs[:, :, 1:]
)
_lowerCAmelCase : List[Any] = directions / directions.norm(dim=-1 , keepdim=a__ )
_lowerCAmelCase : List[str] = torch.stack(
[
torch.broadcast_to(self.origin.view(a__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a__ , *a__ , 2 , 3 )
def __A ( self , a__ , a__ ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a__ , height=a__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> DifferentiableProjectiveCamera:
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_lowerCAmelCase : Union[str, Any] = np.array([np.sin(_lowerCamelCase ), np.cos(_lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_lowerCAmelCase : Optional[int] = -z * 4
_lowerCAmelCase : Dict = np.array([np.cos(_lowerCamelCase ), -np.sin(_lowerCamelCase ), 0.0] )
_lowerCAmelCase : int = np.cross(_lowerCamelCase ,_lowerCamelCase )
origins.append(_lowerCamelCase )
xs.append(_lowerCamelCase )
ys.append(_lowerCamelCase )
zs.append(_lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowerCamelCase ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(_lowerCamelCase ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(_lowerCamelCase ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(_lowerCamelCase ,axis=0 ) ).float() ,width=_lowerCamelCase ,height=_lowerCamelCase ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(_lowerCamelCase )) ,)
| 44
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000 ) -> int:
_lowerCAmelCase , _lowerCAmelCase : Tuple = 1, 1
_lowerCAmelCase : Optional[Any] = 2
while True:
_lowerCAmelCase : Any = 0
_lowerCAmelCase : str = fa + fa
_lowerCAmelCase , _lowerCAmelCase : Any = fa, f
index += 1
for _ in str(_lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 44
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 1
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = hidden_states.shape
_lowerCAmelCase : List[Any] = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_lowerCAmelCase : str = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_lowerCAmelCase : Optional[int] = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : int = None
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = None
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : Tuple = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Tuple = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Optional[Any] = nn.Dense(a__ , dtype=self.dtype )
_lowerCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Any = nn.Dropout(self.dropout_prob )
_lowerCAmelCase : Optional[Any] = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase : str = None
if use_nin_shortcut:
_lowerCAmelCase : Dict = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , a__ , a__ , a__=True ):
_lowerCAmelCase : Union[str, Any] = hidden_states
_lowerCAmelCase : Union[str, Any] = self.norma(a__ )
_lowerCAmelCase : Union[str, Any] = nn.swish(a__ )
_lowerCAmelCase : Dict = self.conva(a__ )
_lowerCAmelCase : Any = self.time_emb_proj(nn.swish(a__ ) )
_lowerCAmelCase : int = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
_lowerCAmelCase : Optional[Any] = hidden_states + temb
_lowerCAmelCase : Optional[Any] = self.norma(a__ )
_lowerCAmelCase : int = nn.swish(a__ )
_lowerCAmelCase : Optional[int] = self.dropout(a__ , a__ )
_lowerCAmelCase : List[Any] = self.conva(a__ )
if self.conv_shortcut is not None:
_lowerCAmelCase : Tuple = self.conv_shortcut(a__ )
return hidden_states + residual
| 44
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 1
|
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
_a : Any = 6_37_81_37.0
_a : List[str] = 6_35_67_52.31_42_45
_a : Tuple = 6_378_137
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
_lowerCAmelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
_lowerCAmelCase : List[str] = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
_lowerCAmelCase : Dict = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[Any] = radians(_lowerCamelCase )
_lowerCAmelCase : int = radians(_lowerCamelCase )
# Equation
_lowerCAmelCase : Dict = sin((phi_a - phi_a) / 2 )
_lowerCAmelCase : Tuple = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_lowerCAmelCase : int = sqrt(sin_sq_phi + (cos(_lowerCamelCase ) * cos(_lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 1
|
"""simple docstring"""
import enum
import shutil
import sys
_a , _a : int = shutil.get_terminal_size()
_a : Optional[Any] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __A ( enum.Enum ):
_UpperCamelCase : int = 0
_UpperCamelCase : List[str] = 1
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Tuple="" ) -> Optional[int]:
sys.stdout.write(str(_lowerCamelCase ) + end )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Union[str, Any]="" ) -> Optional[int]:
forceWrite(f"\u001b[{color}m{content}\u001b[0m" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
forceWrite("""\r""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str ) -> Optional[Any]:
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def SCREAMING_SNAKE_CASE ( ) -> Any:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 44
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 1
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a : int = False
class __A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : str = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_lowerCAmelCase : str = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = generator.manual_seed(0 )
_lowerCAmelCase : Dict = pipe.dual_guided(
prompt="""first prompt""" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __A ( self ):
_lowerCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = """cyberpunk 2077"""
_lowerCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Tuple = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_lowerCAmelCase : str = """A painting of a squirrel eating a burger """
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_lowerCAmelCase : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_lowerCAmelCase : str = pipe.image_variation(a__ , generator=a__ , output_type="""numpy""" ).images
_lowerCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[int] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 44
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 1
|
"""simple docstring"""
_a : Dict = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : Optional[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 44
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 1
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
_a : Optional[int] = None
_a : Optional[int] = {
'7B': 11_008,
'13B': 13_824,
'30B': 17_920,
'65B': 22_016,
'70B': 28_672,
}
_a : int = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Dict=1 ,_lowerCamelCase : Optional[int]=256 ) -> Optional[Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Any:
with open(_lowerCamelCase ,"""r""" ) as f:
return json.load(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[Any] ) -> List[str]:
with open(_lowerCamelCase ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict=True ) -> Dict:
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase ,"""tmp""" )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = read_json(os.path.join(_lowerCamelCase ,"""params.json""" ) )
_lowerCAmelCase : Optional[int] = NUM_SHARDS[model_size]
_lowerCAmelCase : int = params["""n_layers"""]
_lowerCAmelCase : str = params["""n_heads"""]
_lowerCAmelCase : Dict = n_heads // num_shards
_lowerCAmelCase : Tuple = params["""dim"""]
_lowerCAmelCase : Any = dim // n_heads
_lowerCAmelCase : Dict = 1_00_00.0
_lowerCAmelCase : Optional[int] = 1.0 / (base ** (torch.arange(0 ,_lowerCamelCase ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Any = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase : int = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : List[str] = n_heads
_lowerCAmelCase : List[str] = n_heads_per_shard
_lowerCAmelCase : str = dim
# permute for sliced rotary
def permute(_lowerCamelCase : Dict ,_lowerCamelCase : int=n_heads ,_lowerCamelCase : List[str]=dim ,_lowerCamelCase : List[str]=dim ):
return w.view(_lowerCamelCase ,dima // n_heads // 2 ,2 ,_lowerCamelCase ).transpose(1 ,2 ).reshape(_lowerCamelCase ,_lowerCamelCase )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : Any = torch.load(os.path.join(_lowerCamelCase ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_lowerCamelCase ,f"consolidated.{i:02d}.pth" ) ,map_location="""cpu""" )
for i in range(_lowerCamelCase )
]
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Dict = {"""weight_map""": {}}
for layer_i in range(_lowerCamelCase ):
_lowerCAmelCase : int = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_lowerCAmelCase : Optional[Any] = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_lowerCAmelCase : Any = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for i in range(_lowerCamelCase )
] ,dim=0 ,).reshape(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for i in range(_lowerCamelCase )
] ,dim=0 ,).reshape(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,)
_lowerCAmelCase : List[Any] = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for i in range(_lowerCamelCase )
] ,dim=0 ,).reshape(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(_lowerCamelCase )] ,dim=1 )
_lowerCAmelCase : Union[str, Any] = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_lowerCamelCase )] ,dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_lowerCamelCase )] ,dim=1 )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_lowerCamelCase )] ,dim=0 )
_lowerCAmelCase : Optional[Any] = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Tuple = filename
param_count += v.numel()
torch.save(_lowerCamelCase ,os.path.join(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_lowerCAmelCase : Any = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase : str = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_lowerCamelCase )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_lowerCamelCase )] ,dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : Tuple = filename
param_count += v.numel()
torch.save(_lowerCamelCase ,os.path.join(_lowerCamelCase ,_lowerCamelCase ) )
# Write configs
_lowerCAmelCase : Optional[Any] = {"""total_size""": param_count * 2}
write_json(_lowerCamelCase ,os.path.join(_lowerCamelCase ,"""pytorch_model.bin.index.json""" ) )
_lowerCAmelCase : Any = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase : Optional[Any] = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase : List[str] = LlamaConfig(
hidden_size=_lowerCamelCase ,intermediate_size=compute_intermediate_size(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_lowerCamelCase ,)
config.save_pretrained(_lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(_lowerCamelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_lowerCamelCase ,safe_serialization=_lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[int] ) -> Tuple:
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase : Optional[int] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_lowerCAmelCase : Optional[Any] = tokenizer_class(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_lowerCamelCase ,help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
_lowerCAmelCase : List[Any] = os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 1
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
"""simple docstring"""
_a : Tuple = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_a : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_a : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_a : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_a : Optional[Any] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_a : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_a : Union[str, Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_a : int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 44
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> int:
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class __A ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def __A ( a__ ):
_lowerCAmelCase : Optional[int] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=a__ , default=a__ , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=a__ , help="""Name of the model to download""" )
download_parser.set_defaults(func=a__ )
def __init__( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : str = model
_lowerCAmelCase : List[Any] = cache
_lowerCAmelCase : Tuple = force
_lowerCAmelCase : List[Any] = trust_remote_code
def __A ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 44
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 1
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Callable ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : List[str] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Dict = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : List[str] = y[k] + step_size * ode_func(_lowerCamelCase ,y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 1
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
def wrapper(*_lowerCamelCase : Any ,**_lowerCamelCase : List[str] ):
_lowerCAmelCase : Any = timeit.default_timer()
_lowerCAmelCase : str = func(*_lowerCamelCase ,**_lowerCamelCase )
_lowerCAmelCase : Tuple = timeit.default_timer() - starttime
return delta
_lowerCAmelCase : Union[str, Any] = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : dict ,_lowerCamelCase : Optional[Any]=100 ,_lowerCamelCase : Union[str, Any]=None ) -> Union[str, Any]:
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : str = seq_shapes or {}
for i in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_lowerCamelCase ,_ArrayXD ):
_lowerCAmelCase : Union[str, Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_lowerCamelCase ,datasets.Value ):
if v.dtype == "string":
_lowerCAmelCase : Union[str, Any] = """The small grey turtle was surprisingly fast when challenged."""
else:
_lowerCAmelCase : Union[str, Any] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(_lowerCamelCase ,datasets.Sequence ):
while isinstance(_lowerCamelCase ,datasets.Sequence ):
_lowerCAmelCase : List[Any] = v.feature
_lowerCAmelCase : Optional[Any] = seq_shapes[k]
_lowerCAmelCase : Union[str, Any] = np.random.rand(*_lowerCamelCase ).astype(v.dtype )
_lowerCAmelCase : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Any ,_lowerCamelCase : str=100 ,_lowerCamelCase : List[str]=None ) -> List[str]:
_lowerCAmelCase : Dict = generate_examples(_lowerCamelCase ,num_examples=_lowerCamelCase ,seq_shapes=_lowerCamelCase )
with ArrowWriter(features=_lowerCamelCase ,path=_lowerCamelCase ) as writer:
for key, record in dummy_data:
_lowerCAmelCase : List[Any] = features.encode_example(_lowerCamelCase )
writer.write(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Dict = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
_lowerCAmelCase : Optional[Any] = datasets.Dataset.from_file(filename=_lowerCamelCase ,info=datasets.DatasetInfo(features=_lowerCamelCase ) )
return dataset
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = ProphetNetTokenizer
_UpperCamelCase : Any = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , a__ ):
_lowerCAmelCase : str = """UNwant\u00E9d,running"""
_lowerCAmelCase : List[str] = """unwanted, running"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(a__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [9, 6, 7, 12, 10, 11] )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __A ( self ):
_lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
_lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __A ( self ):
_lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
_lowerCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
_lowerCAmelCase : str = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=a__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __A ( self ):
_lowerCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase : Optional[int] = {}
for i, token in enumerate(a__ ):
_lowerCAmelCase : Optional[Any] = i
_lowerCAmelCase : str = WordpieceTokenizer(vocab=a__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def __A ( self ):
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase : Any = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
_lowerCAmelCase : Any = tokenizer(a__ , padding=a__ , return_tensors="""pt""" )
self.assertIsInstance(a__ , a__ )
_lowerCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __A ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __A ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __A ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def __A ( self ):
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a__ )
_lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 44
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 1
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : list[int] ,_lowerCamelCase : int ) -> list[int]:
_lowerCAmelCase : Optional[int] = [0] * no_of_processes
_lowerCAmelCase : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = burst_time[i]
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : int = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_lowerCAmelCase : str = []
_lowerCAmelCase : List[str] = -1
for i in range(_lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Tuple = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_lowerCAmelCase : Tuple = i
total_time += burst_time[target_process]
completed += 1
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ,_lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Tuple = [0] * no_of_processes
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
_a : Dict = 4
_a : Optional[Any] = [2, 5, 3, 7]
_a : List[str] = [0, 0, 0, 0]
_a : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_a : str = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 44
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> bool:
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) + 1
_lowerCAmelCase : Any = len(_lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase : Union[str, Any] = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
# since string of zero length match pattern of zero length
_lowerCAmelCase : Tuple = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : Any = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 ,_lowerCamelCase ):
for j in range(1 ,_lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase : Any = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase : Optional[Any] = dp[i - 1][j]
else:
_lowerCAmelCase : Dict = 0
else:
_lowerCAmelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_a : Tuple = 'aab'
_a : int = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 44
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
while repunit:
_lowerCAmelCase : List[Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : Optional[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 1
|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Iterable[str] ,_lowerCamelCase : int ) -> Generator[tuple[str, ...], None, None]:
_lowerCAmelCase : List[Any] = iter(_lowerCamelCase )
while True:
_lowerCAmelCase : Optional[Any] = tuple(itertools.islice(_lowerCamelCase ,_lowerCamelCase ) )
if not chunk:
return
yield chunk
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str:
_lowerCAmelCase : str = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
_lowerCAmelCase : Dict = """"""
if len(_lowerCamelCase ) < 2:
return dirty
for i in range(len(_lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCamelCase ) & 1:
clean += "X"
return clean
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
_lowerCAmelCase : Union[str, Any] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_lowerCAmelCase : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCamelCase )
return table
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> str:
_lowerCAmelCase : List[str] = generate_table(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = prepare_input(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase ,2 ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = divmod(table.index(_lowerCamelCase ) ,5 )
_lowerCAmelCase , _lowerCAmelCase : Dict = divmod(table.index(_lowerCamelCase ) ,5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> str:
_lowerCAmelCase : Union[str, Any] = generate_table(_lowerCamelCase )
_lowerCAmelCase : Dict = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase ,2 ):
_lowerCAmelCase , _lowerCAmelCase : Dict = divmod(table.index(_lowerCamelCase ) ,5 )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = divmod(table.index(_lowerCamelCase ) ,5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 44
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "WhisperFeatureExtractor"
_UpperCamelCase : List[Any] = "WhisperTokenizer"
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
_lowerCAmelCase : int = self.feature_extractor
_lowerCAmelCase : Union[str, Any] = False
def __A ( self , a__=None , a__=None , a__=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a__ , language=a__ , no_timestamps=a__ )
def __call__( self , *a__ , **a__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__ )
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""audio""" , a__ )
_lowerCAmelCase : Dict = kwargs.pop("""sampling_rate""" , a__ )
_lowerCAmelCase : Optional[Any] = kwargs.pop("""text""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : Any = args[0]
_lowerCAmelCase : int = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowerCAmelCase : str = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ )
if text is not None:
_lowerCAmelCase : Any = self.tokenizer(a__ , **a__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase : Any = encodings["""input_ids"""]
return inputs
def __A ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
def __A ( self , a__ , a__="np" ):
return self.tokenizer.get_prompt_ids(a__ , return_tensors=a__ )
| 44
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Dict:
if hor == 128:
_lowerCAmelCase : Any = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_lowerCAmelCase : Optional[int] = (32, 128, 256)
_lowerCAmelCase : Any = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
_lowerCAmelCase : Tuple = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_lowerCAmelCase : int = (32, 64, 128, 256)
_lowerCAmelCase : List[Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
_lowerCAmelCase : Tuple = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
_lowerCAmelCase : Union[str, Any] = model.state_dict()
_lowerCAmelCase : Dict = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
_lowerCAmelCase : Optional[int] = UNetaDModel(**_lowerCamelCase )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
_lowerCAmelCase : int = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
hf_value_function.load_state_dict(_lowerCamelCase )
torch.save(hf_value_function.state_dict() ,f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : List[str] = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
_lowerCAmelCase : Union[str, Any] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
_lowerCAmelCase : Any = model
_lowerCAmelCase : int = UNetaDModel(**_lowerCamelCase )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
_lowerCAmelCase : Optional[Any] = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
hf_value_function.load_state_dict(_lowerCamelCase )
torch.save(hf_value_function.state_dict() ,"""hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 44
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 1
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" ,type=_lowerCamelCase ,default="""biencoder-nq-dev.json""" ,help="""Path to raw DPR training data""" ,)
parser.add_argument(
"""--evaluation_set""" ,type=_lowerCamelCase ,help="""where to store parsed evaluation_set file""" ,)
parser.add_argument(
"""--gold_data_path""" ,type=_lowerCamelCase ,help="""where to store parsed gold_data_path file""" ,)
_lowerCAmelCase : Any = parser.parse_args()
with open(args.src_path ,"""r""" ) as src_file, open(args.evaluation_set ,"""w""" ) as eval_file, open(
args.gold_data_path ,"""w""" ) as gold_file:
_lowerCAmelCase : List[str] = json.load(_lowerCamelCase )
for dpr_record in tqdm(_lowerCamelCase ):
_lowerCAmelCase : Any = dpr_record["""question"""]
_lowerCAmelCase : int = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(_lowerCamelCase ) + """\n""" )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = CycleDiffusionPipeline
_UpperCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
_UpperCamelCase : str = PipelineTesterMixin.required_optional_params - {"latents"}
_UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
_UpperCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_lowerCAmelCase : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCAmelCase : Tuple = CLIPTextModel(a__ )
_lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : str = image / 2 + 0.5
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Any = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[int] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : List[str] = CycleDiffusionPipeline(**a__ )
_lowerCAmelCase : List[str] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : int = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Optional[Any] = pipe(**a__ )
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_lowerCAmelCase : Dict = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ):
_lowerCAmelCase : str = self.get_dummy_components()
for name, module in components.items():
if hasattr(a__ , """half""" ):
_lowerCAmelCase : Any = module.half()
_lowerCAmelCase : int = CycleDiffusionPipeline(**a__ )
_lowerCAmelCase : Dict = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Optional[int] = pipe(**a__ )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : str = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_lowerCAmelCase : int = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def __A ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def __A ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __A ( self ):
return super().test_save_load_optional_components()
@skip_mps
def __A ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
_lowerCAmelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
_lowerCAmelCase : Optional[int] = init_image.resize((512, 512) )
_lowerCAmelCase : Optional[Any] = """CompVis/stable-diffusion-v1-4"""
_lowerCAmelCase : List[str] = DDIMScheduler.from_pretrained(a__ , subfolder="""scheduler""" )
_lowerCAmelCase : Tuple = CycleDiffusionPipeline.from_pretrained(
a__ , scheduler=a__ , safety_checker=a__ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_lowerCAmelCase : Dict = """A black colored car"""
_lowerCAmelCase : str = """A blue colored car"""
_lowerCAmelCase : int = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=a__ , source_prompt=a__ , image=a__ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : int = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __A ( self ):
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
_lowerCAmelCase : List[Any] = init_image.resize((512, 512) )
_lowerCAmelCase : Tuple = """CompVis/stable-diffusion-v1-4"""
_lowerCAmelCase : Dict = DDIMScheduler.from_pretrained(a__ , subfolder="""scheduler""" )
_lowerCAmelCase : Optional[int] = CycleDiffusionPipeline.from_pretrained(a__ , scheduler=a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_lowerCAmelCase : Union[str, Any] = """A black colored car"""
_lowerCAmelCase : Any = """A blue colored car"""
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : Dict = pipe(
prompt=a__ , source_prompt=a__ , image=a__ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : Optional[int] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 44
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : Optional[torch.FloatTensor] = None
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = 2
@register_to_config
def __init__( self , a__ = 0.0_2 , a__ = 100 , a__ = 1.0_0_7 , a__ = 80 , a__ = 0.0_5 , a__ = 50 , ):
# standard deviation of the initial noise distribution
_lowerCAmelCase : Union[str, Any] = sigma_max
# setable values
_lowerCAmelCase : int = None
_lowerCAmelCase : np.IntTensor = None
_lowerCAmelCase : torch.FloatTensor = None # sigma(t_i)
def __A ( self , a__ , a__ = None ):
return sample
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[int] = num_inference_steps
_lowerCAmelCase : Any = np.arange(0 , self.num_inference_steps )[::-1].copy()
_lowerCAmelCase : int = torch.from_numpy(a__ ).to(a__ )
_lowerCAmelCase : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCAmelCase : Dict = torch.tensor(a__ , dtype=torch.floataa , device=a__ )
def __A ( self , a__ , a__ , a__ = None ):
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_lowerCAmelCase : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase : str = self.config.s_noise * randn_tensor(sample.shape , generator=a__ ).to(sample.device )
_lowerCAmelCase : Dict = sigma + gamma * sigma
_lowerCAmelCase : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __A ( self , a__ , a__ , a__ , a__ , a__ = True , ):
_lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output
_lowerCAmelCase : List[str] = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a__ , derivative=a__ , pred_original_sample=a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ = True , ):
_lowerCAmelCase : Tuple = sample_prev + sigma_prev * model_output
_lowerCAmelCase : str = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a__ , derivative=a__ , pred_original_sample=a__ )
def __A ( self , a__ , a__ , a__ ):
raise NotImplementedError()
| 44
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = tempfile.mkdtemp()
_lowerCAmelCase : str = SamImageProcessor()
_lowerCAmelCase : str = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **a__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Dict = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
_lowerCAmelCase : str = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
_lowerCAmelCase : Dict = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : List[str] = SamProcessor(image_processor=a__ )
_lowerCAmelCase : str = self.prepare_image_inputs()
_lowerCAmelCase : Any = image_processor(a__ , return_tensors="""np""" )
_lowerCAmelCase : Dict = processor(images=a__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def __A ( self ):
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = SamProcessor(image_processor=a__ )
_lowerCAmelCase : Union[str, Any] = [torch.ones((1, 3, 5, 5) )]
_lowerCAmelCase : Optional[int] = [[1764, 2646]]
_lowerCAmelCase : Optional[Any] = [[683, 1024]]
_lowerCAmelCase : Union[str, Any] = processor.post_process_masks(a__ , a__ , a__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : int = processor.post_process_masks(
a__ , torch.tensor(a__ ) , torch.tensor(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_lowerCAmelCase : int = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase : Optional[int] = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(a__ ):
_lowerCAmelCase : Any = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
@require_vision
@require_tf
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Any = SamImageProcessor()
_lowerCAmelCase : Dict = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **a__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
_lowerCAmelCase : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
_lowerCAmelCase : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __A ( self ):
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : List[Any] = SamProcessor(image_processor=a__ )
_lowerCAmelCase : int = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = image_processor(a__ , return_tensors="""np""" )
_lowerCAmelCase : str = processor(images=a__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def __A ( self ):
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : List[Any] = SamProcessor(image_processor=a__ )
_lowerCAmelCase : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
_lowerCAmelCase : Tuple = [[1764, 2646]]
_lowerCAmelCase : int = [[683, 1024]]
_lowerCAmelCase : List[str] = processor.post_process_masks(a__ , a__ , a__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : Optional[Any] = processor.post_process_masks(
a__ , tf.convert_to_tensor(a__ ) , tf.convert_to_tensor(a__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_lowerCAmelCase : Dict = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase : List[Any] = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : str = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_lowerCAmelCase : Dict = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : int = SamImageProcessor()
_lowerCAmelCase : List[str] = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **a__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : str = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __A ( self ):
_lowerCAmelCase : Tuple = self.get_image_processor()
_lowerCAmelCase : List[str] = SamProcessor(image_processor=a__ )
_lowerCAmelCase : int = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_lowerCAmelCase : Optional[Any] = [tf.convert_to_tensor(a__ )]
_lowerCAmelCase : Optional[Any] = [torch.tensor(a__ )]
_lowerCAmelCase : Optional[int] = [[1764, 2646]]
_lowerCAmelCase : Union[str, Any] = [[683, 1024]]
_lowerCAmelCase : int = processor.post_process_masks(
a__ , a__ , a__ , return_tensors="""tf""" )
_lowerCAmelCase : Optional[Any] = processor.post_process_masks(
a__ , a__ , a__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __A ( self ):
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Optional[int] = SamProcessor(image_processor=a__ )
_lowerCAmelCase : Any = self.prepare_image_inputs()
_lowerCAmelCase : List[Any] = image_processor(a__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_lowerCAmelCase : List[str] = processor(images=a__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_lowerCAmelCase : int = image_processor(a__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
_lowerCAmelCase : Tuple = processor(images=a__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
| 44
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_a : Optional[Any] = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['DPTFeatureExtractor']
_a : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> bool:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_lowerCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(_lowerCamelCase ) == 1:
return True
_lowerCAmelCase : Dict = series[1] - series[0]
for index in range(len(_lowerCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> float:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_lowerCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
_lowerCAmelCase : Union[str, Any] = 0
for val in series:
answer += val
return answer / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 1
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 1
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 1
|
"""simple docstring"""
from functools import lru_cache
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> set:
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : List[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCamelCase )
if n > 1:
factors.add(_lowerCamelCase )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
return len(unique_prime_factors(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> bool:
return len(set(_lowerCamelCase ) ) in (0, 1)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> list:
_lowerCAmelCase : Any = 2
while True:
# Increment each value of a generated range
_lowerCAmelCase : str = [base + i for i in range(_lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_lowerCAmelCase : List[str] = [upf_len(_lowerCamelCase ) for x in group]
checker.append(_lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 4 ) -> int:
_lowerCAmelCase : Optional[Any] = run(_lowerCamelCase )
return results[0] if len(_lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 44
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_a : List[Any] = ['gpt2']
_a : List[str] = 'gpt2'
if is_tf_available():
class __A ( tf.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = tokenizer
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(a__ )
_lowerCAmelCase : Union[str, Any] = TFGPTaLMHeadModel.from_config(a__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ )
_lowerCAmelCase : List[str] = tokenized["""input_ids"""].to_tensor()
_lowerCAmelCase : Union[str, Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_lowerCAmelCase : List[str] = self.model(input_ids=a__ , attention_mask=a__ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class __A ( unittest.TestCase ):
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [GPTaTokenizer.from_pretrained(a__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_lowerCAmelCase : str = [TFGPTaTokenizer.from_pretrained(a__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCAmelCase : str = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
_lowerCAmelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_lowerCAmelCase : Dict = tokenizer([test_inputs] , return_tensors="""tf""" )
_lowerCAmelCase : Dict = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_lowerCAmelCase : List[str] = python_outputs[key].numpy()
_lowerCAmelCase : Union[str, Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Tuple = tf.function(a__ )
for test_inputs in self.test_sentences:
_lowerCAmelCase : Optional[Any] = tf.constant(a__ )
_lowerCAmelCase : Dict = compiled_tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tf_tokenizer(a__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Tuple = ModelToSave(tokenizer=a__ )
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCAmelCase : List[str] = model.serving(a__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : str = Path(a__ ) / """saved.model"""
tf.saved_model.save(a__ , a__ , signatures={"""serving_default""": model.serving} )
_lowerCAmelCase : Union[str, Any] = tf.saved_model.load(a__ )
_lowerCAmelCase : Any = loaded_model.signatures["""serving_default"""](a__ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCAmelCase : str = tf_tokenizer(a__ ) # Build model with some sample inputs
_lowerCAmelCase : Optional[int] = tf_tokenizer.get_config()
_lowerCAmelCase : List[str] = TFGPTaTokenizer.from_config(a__ )
_lowerCAmelCase : Any = model_from_config(a__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_lowerCAmelCase : List[str] = 123123
for max_length in [3, 5, 1024]:
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCAmelCase : List[str] = tf_tokenizer(a__ , max_length=a__ )
_lowerCAmelCase : Union[str, Any] = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 44
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
"""simple docstring"""
_a : Dict = 0 # The first color of the flag.
_a : Union[str, Any] = 1 # The second color of the flag.
_a : Dict = 2 # The third color of the flag.
_a : Union[str, Any] = (red, white, blue)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> list:
if not sequence:
return []
if len(_lowerCamelCase ) == 1:
return list(_lowerCamelCase )
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1
_lowerCAmelCase : Tuple = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Any = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Any = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Dict = f"The elements inside the sequence must contains only {colors} values"
raise ValueError(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : int = input('Enter numbers separated by commas:\n').strip()
_a : Any = [int(item.strip()) for item in user_input.split(',')]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 44
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 1
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 1
|
"""simple docstring"""
import itertools
import math
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_lowerCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : Any = 2
while True:
if is_prime(_lowerCamelCase ):
yield num
num += 1
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() ,nth - 1 ,_lowerCamelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = DanceDiffusionPipeline
_UpperCamelCase : int = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCamelCase : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[int] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a__ , use_timestep_embedding=a__ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
_lowerCAmelCase : List[str] = IPNDMScheduler()
_lowerCAmelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Dict = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def __A ( self ):
_lowerCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = DanceDiffusionPipeline(**a__ )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Optional[Any] = pipe(**a__ )
_lowerCAmelCase : int = output.audios
_lowerCAmelCase : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Dict = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ):
return super().test_save_load_local()
@skip_mps
def __A ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __A ( self ):
return super().test_save_load_optional_components()
@skip_mps
def __A ( self ):
return super().test_attention_slicing_forward_pass()
def __A ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = torch_device
_lowerCAmelCase : List[str] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = pipe(generator=a__ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Dict = output.audios
_lowerCAmelCase : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : int = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : List[str] = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
_lowerCAmelCase : str = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=a__ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Any = output.audios
_lowerCAmelCase : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Tuple = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"image": Image()} )
_UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
_UpperCamelCase : str = "image"
_UpperCamelCase : str = "labels"
def __A ( self , a__ ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , a__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : str = copy.deepcopy(self )
_lowerCAmelCase : Dict = self.label_schema.copy()
_lowerCAmelCase : str = features[self.label_column]
_lowerCAmelCase : Optional[Any] = label_schema
return task_template
@property
def __A ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 44
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[Any] = logging.get_logger(__name__)
_a : Optional[int] = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "dinat"
_UpperCamelCase : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=4 , a__=3 , a__=64 , a__=[3, 4, 6, 5] , a__=[2, 4, 8, 16] , a__=7 , a__=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , a__=3.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=0.0_2 , a__=1e-5 , a__=0.0 , a__=None , a__=None , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Tuple = len(a__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : List[str] = kernel_size
_lowerCAmelCase : Any = dilations
_lowerCAmelCase : List[Any] = mlp_ratio
_lowerCAmelCase : List[Any] = qkv_bias
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = drop_path_rate
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Optional[int] = layer_scale_init_value
_lowerCAmelCase : List[str] = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : Dict = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 44
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 1
|
"""simple docstring"""
class __A :
def __init__( self ):
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : List[str] = []
def __A ( self , a__ , a__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_lowerCAmelCase : Dict = self.__min_dist_top_down_dp(a__ , n - 1 )
_lowerCAmelCase : int = self.__min_dist_top_down_dp(m - 1 , a__ )
_lowerCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_lowerCAmelCase : Optional[int] = 1 + min(a__ , a__ , a__ )
return self.dp[m][n]
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = worda
_lowerCAmelCase : Any = worda
_lowerCAmelCase : List[Any] = [[-1 for _ in range(len(a__ ) )] for _ in range(len(a__ ) )]
return self.__min_dist_top_down_dp(len(a__ ) - 1 , len(a__ ) - 1 )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Dict = worda
_lowerCAmelCase : Union[str, Any] = worda
_lowerCAmelCase : Optional[Any] = len(a__ )
_lowerCAmelCase : Union[str, Any] = len(a__ )
_lowerCAmelCase : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCAmelCase : Any = j
elif j == 0: # second string is empty
_lowerCAmelCase : Tuple = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCAmelCase : int = self.dp[i - 1][j - 1]
else:
_lowerCAmelCase : List[str] = self.dp[i][j - 1]
_lowerCAmelCase : Tuple = self.dp[i - 1][j]
_lowerCAmelCase : Dict = self.dp[i - 1][j - 1]
_lowerCAmelCase : Optional[int] = 1 + min(a__ , a__ , a__ )
return self.dp[m][n]
if __name__ == "__main__":
_a : List[str] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
_a : List[Any] = input('Enter the first string: ').strip()
_a : List[Any] = input('Enter the second string: ').strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 44
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 1
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "gptj"
_UpperCamelCase : int = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a__=50400 , a__=2048 , a__=4096 , a__=28 , a__=16 , a__=64 , a__=None , a__="gelu_new" , a__=0.0 , a__=0.0 , a__=0.0 , a__=1e-5 , a__=0.0_2 , a__=True , a__=50256 , a__=50256 , a__=False , **a__ , ):
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Dict = n_positions
_lowerCAmelCase : Optional[int] = n_embd
_lowerCAmelCase : str = n_layer
_lowerCAmelCase : Optional[Any] = n_head
_lowerCAmelCase : int = n_inner
_lowerCAmelCase : int = rotary_dim
_lowerCAmelCase : str = activation_function
_lowerCAmelCase : Dict = resid_pdrop
_lowerCAmelCase : str = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Optional[int] = layer_norm_epsilon
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Dict = use_cache
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : str = eos_token_id
super().__init__(
bos_token_id=a__ , eos_token_id=a__ , tie_word_embeddings=a__ , **a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = "default" , a__ = None , a__ = False , ):
super().__init__(a__ , task=a__ , patching_specs=a__ , use_past=a__ )
if not getattr(self._config , """pad_token_id""" , a__ ):
# TODO: how to do that better?
_lowerCAmelCase : List[Any] = 0
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="""inputs""" )
_lowerCAmelCase : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_lowerCAmelCase : Any = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ):
return self._config.n_layer
@property
def __A ( self ):
return self._config.n_head
def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
_lowerCAmelCase : str = super(a__ , self ).generate_dummy_inputs(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Dict = seqlen + 2
_lowerCAmelCase : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Optional[Any] = [
(torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : List[str] = common_inputs["""attention_mask"""]
if self.use_past:
_lowerCAmelCase : Tuple = ordered_inputs["""attention_mask"""].dtype
_lowerCAmelCase : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self ):
return 13
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 1
|
"""simple docstring"""
from __future__ import annotations
class __A :
def __init__( self , a__=None ):
_lowerCAmelCase : Optional[Any] = data
_lowerCAmelCase : Dict = None
def __repr__( self ):
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = self
while temp:
string_rep.append(F"{temp.data}" )
_lowerCAmelCase : str = temp.next
return "->".join(a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> List[Any]:
if not elements_list:
raise Exception("""The Elements List is empty""" )
_lowerCAmelCase : Dict = Node(elements_list[0] )
for i in range(1 ,len(_lowerCamelCase ) ):
_lowerCAmelCase : Tuple = Node(elements_list[i] )
_lowerCAmelCase : List[Any] = current.next
return head
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node ) -> None:
if head_node is not None and isinstance(_lowerCamelCase ,_lowerCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
from doctest import testmod
testmod()
_lowerCAmelCase : Dict = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(_lowerCamelCase )
print("""Elements in Reverse:""" )
print_reverse(_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 1
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_UpperCamelCase : Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
if os.name == "nt":
_lowerCAmelCase : Optional[int] = CursorInfo()
_lowerCAmelCase : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
_lowerCAmelCase : str = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
if os.name == "nt":
_lowerCAmelCase : Tuple = CursorInfo()
_lowerCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
_lowerCAmelCase : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 44
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
"""simple docstring"""
from math import factorial
_a : str = {str(d): factorial(d) for d in range(10)}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,_lowerCamelCase ) if sum_of_digit_factorial(_lowerCamelCase ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Dict = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ) -> bool:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCAmelCase : Optional[int] = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_lowerCAmelCase : List[str] = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_lowerCAmelCase : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCAmelCase : Any = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 1
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_a : Any = '\nimport os\n'
_a : int = '\ndef foo():\n import os\n return False\n'
_a : List[str] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
_a : Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
_a : Any = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
_a : str = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
_a : int = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
_a : Dict = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
_a : List[str] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
_a : List[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
_a : List[str] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ) -> List[str]:
_lowerCAmelCase : Tuple = os.path.join(_lowerCamelCase ,"""test_file.py""" )
with open(_lowerCamelCase ,"""w""" ) as _tmp_file:
_tmp_file.write(_lowerCamelCase )
_lowerCAmelCase : int = get_imports(_lowerCamelCase )
assert parsed_imports == ["os"]
| 44
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self , a__ , a__=13 , a__=32 , a__=3 , a__=4 , a__=[10, 20, 30, 40] , a__=[2, 2, 3, 2] , a__=True , a__=True , a__=37 , a__="gelu" , a__=10 , a__=0.0_2 , a__=["stage2", "stage3", "stage4"] , a__=[2, 3, 4] , a__=None , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Any = num_stages
_lowerCAmelCase : Tuple = hidden_sizes
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = out_features
_lowerCAmelCase : int = out_indices
_lowerCAmelCase : List[Any] = scope
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Optional[int] = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : List[Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : List[str] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : str = False
_UpperCamelCase : Dict = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ConvNextModelTester(self )
_lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def __A ( self ):
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(a__ )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def __A ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
_lowerCAmelCase : Dict = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __A ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(a__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Union[str, Any] = image_processor(images=a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**a__ )
# verify the logits
_lowerCAmelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@require_torch
class __A ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = (ConvNextBackbone,) if is_torch_available() else ()
_UpperCamelCase : int = ConvNextConfig
_UpperCamelCase : Dict = False
def __A ( self ):
_lowerCAmelCase : str = ConvNextModelTester(self )
| 44
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 1
|
"""simple docstring"""
from collections import defaultdict
class __A :
def __init__( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_lowerCAmelCase : Any = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(a__ ) )
]
_lowerCAmelCase : Dict = defaultdict(a__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_lowerCAmelCase : Union[str, Any] = (1 << len(a__ )) - 1
def __A ( self , a__ , a__ ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_lowerCAmelCase : Any = self.count_ways_until(a__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_lowerCAmelCase : List[str] = total_ways_util
return self.dp[mask][task_no]
def __A ( self , a__ ):
# Store the list of persons for each task
for i in range(len(a__ ) ):
for j in task_performed[i]:
self.task[j].append(a__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_a : Any = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_a : Any = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 44
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 1
|
"""simple docstring"""
_a : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_a : Optional[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Tuple = from_type.lower().strip("""s""" )
_lowerCAmelCase : str = to_type.lower().strip("""s""" )
_lowerCAmelCase : int = UNIT_SYMBOL.get(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Any = UNIT_SYMBOL.get(_lowerCamelCase ,_lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : Optional[int] = (
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : int = (
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Tuple = METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase : Optional[int] = METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase : List[str] = 1
if from_exponent > to_exponent:
_lowerCAmelCase : str = from_exponent - to_exponent
else:
_lowerCAmelCase : Dict = -(to_exponent - from_exponent)
return value * pow(10 ,_lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 44
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 1
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = "bart"
_UpperCamelCase : Tuple = ["past_key_values"]
_UpperCamelCase : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a__=50265 , a__=1024 , a__=12 , a__=4096 , a__=16 , a__=12 , a__=4096 , a__=16 , a__=0.0 , a__=0.0 , a__="gelu" , a__=1024 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=0.0 , a__=False , a__=True , a__=3 , a__=1 , a__=0 , a__=2 , a__=True , a__=2 , a__=2 , **a__ , ):
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Dict = d_model
_lowerCAmelCase : List[Any] = encoder_ffn_dim
_lowerCAmelCase : Tuple = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Dict = decoder_ffn_dim
_lowerCAmelCase : Any = decoder_layers
_lowerCAmelCase : Dict = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : Union[str, Any] = activation_dropout
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Optional[Any] = init_std
_lowerCAmelCase : int = encoder_layerdrop
_lowerCAmelCase : List[Any] = decoder_layerdrop
_lowerCAmelCase : int = classifier_dropout
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : str = encoder_layers
_lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , a__ ):
_lowerCAmelCase : Union[str, Any] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def __A ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase : Optional[Any] = {0: """batch"""}
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase : str = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.num_layers
for i in range(a__ ):
_lowerCAmelCase : int = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __A ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : List[Any] = super().outputs
else:
_lowerCAmelCase : List[str] = super(a__ , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : Any = self.num_layers
for i in range(a__ ):
_lowerCAmelCase : str = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
_lowerCAmelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
# Generate decoder inputs
_lowerCAmelCase : List[Any] = seq_length if not self.use_past else 1
_lowerCAmelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : List[str] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase : Optional[int] = dict(**a__ , **a__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : List[str] = common_inputs["""input_ids"""].shape
_lowerCAmelCase : Tuple = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.num_attention_heads
_lowerCAmelCase : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : List[Any] = decoder_seq_length + 3
_lowerCAmelCase : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase : Union[str, Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(a__ , a__ )] , dim=1 )
_lowerCAmelCase : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.num_layers
_lowerCAmelCase : Dict = min(a__ , a__ )
_lowerCAmelCase : List[Any] = max(a__ , a__ ) - min_num_layers
_lowerCAmelCase : Union[str, Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(a__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
torch.zeros(a__ ),
) )
# TODO: test this.
_lowerCAmelCase : List[str] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(a__ , a__ ):
common_inputs["past_key_values"].append((torch.zeros(a__ ), torch.zeros(a__ )) )
return common_inputs
def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
_lowerCAmelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , a__ , a__ , a__ , a__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Optional[int] = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase : Dict = self.num_layers
_lowerCAmelCase , _lowerCAmelCase : int = self.num_attention_heads
_lowerCAmelCase : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase : Any = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 )
_lowerCAmelCase : List[Any] = [
(torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(a__ )
]
return common_inputs
def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase : Tuple = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(a__ )
_lowerCAmelCase : Any = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase : str = dict(tokenizer(a__ , return_tensors=a__ ) )
return common_inputs
def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
elif self.task == "causal-lm":
_lowerCAmelCase : str = self._generate_dummy_inputs_for_causal_lm(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
else:
_lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
return common_inputs
def __A ( self , a__ , a__ , a__ , a__ ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : List[str] = super()._flatten_past_key_values_(a__ , a__ , a__ , a__ )
else:
_lowerCAmelCase : Tuple = super(a__ , self )._flatten_past_key_values_(
a__ , a__ , a__ , a__ )
| 44
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = TextToVideoSDPipeline
_UpperCamelCase : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_UpperCamelCase : List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
_lowerCAmelCase : Any = CLIPTextModel(a__ )
_lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Any = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : str = TextToVideoSDPipeline(**a__ )
_lowerCAmelCase : Union[str, Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = self.get_dummy_inputs(a__ )
_lowerCAmelCase : int = """np"""
_lowerCAmelCase : List[Any] = sd_pipe(**a__ ).frames
_lowerCAmelCase : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_lowerCAmelCase : Union[str, Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_lowerCAmelCase : Dict = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCAmelCase : Optional[int] = pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = """Spiderman is surfing"""
_lowerCAmelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Dict = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""pt""" ).frames
_lowerCAmelCase : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
_lowerCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_lowerCAmelCase : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_lowerCAmelCase : Dict = pipe.to("""cuda""" )
_lowerCAmelCase : Any = """Spiderman is surfing"""
_lowerCAmelCase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(a__ , generator=a__ , num_inference_steps=2 , output_type="""pt""" ).frames
_lowerCAmelCase : Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 44
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 1
|
"""simple docstring"""
_a : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
_a : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_lowerCAmelCase : Any = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] ,3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 1
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_a : Any = logging.get_logger(__name__)
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **a__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase : Tuple = deprecated_arg[3:]
setattr(self , a__ , not kwargs.pop(a__ ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
_lowerCAmelCase : List[Any] = kwargs.pop("""torchscript""" , self.torchscript )
_lowerCAmelCase : List[str] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
_lowerCAmelCase : List[str] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**a__ )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Trace the models using torchscript"} )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
_UpperCamelCase : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def __A ( self ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
_lowerCAmelCase : int = torch.device("""cpu""" )
_lowerCAmelCase : Union[str, Any] = 0
elif is_torch_tpu_available():
_lowerCAmelCase : str = xm.xla_device()
_lowerCAmelCase : Optional[Any] = 0
else:
_lowerCAmelCase : Union[str, Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
_lowerCAmelCase : Optional[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def __A ( self ):
return is_torch_tpu_available() and self.tpu
@property
def __A ( self ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __A ( self ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def __A ( self ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def __A ( self ):
return self.n_gpu > 0
| 44
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 1
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = TransfoXLTokenizer
_UpperCamelCase : List[str] = False
_UpperCamelCase : int = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : int = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , **a__ ):
_lowerCAmelCase : Dict = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = """<unk> UNwanted , running"""
_lowerCAmelCase : List[str] = """<unk> unwanted, running"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(a__ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [0, 4, 8, 7] )
def __A ( self ):
_lowerCAmelCase : Dict = TransfoXLTokenizer(lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def __A ( self ):
_lowerCAmelCase : Any = TransfoXLTokenizer(lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
_lowerCAmelCase : Optional[int] = TransfoXLTokenizer(lower_case=a__ )
_lowerCAmelCase : Dict = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_lowerCAmelCase : Tuple = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(a__ ) , a__ )
self.assertEqual(tokenizer.convert_tokens_to_string(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = len(a__ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(a__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 44
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ) -> int:
# Load configuration defined in the metadata file
with open(_lowerCamelCase ) as metadata_file:
_lowerCAmelCase : Tuple = json.load(_lowerCamelCase )
_lowerCAmelCase : Dict = LukeConfig(use_entity_aware_attention=_lowerCamelCase ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase : List[Any] = torch.load(_lowerCamelCase ,map_location="""cpu""" )
# Load the entity vocab file
_lowerCAmelCase : Optional[int] = load_entity_vocab(_lowerCamelCase )
_lowerCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase : Optional[Any] = AddedToken("""<ent>""" ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = AddedToken("""<ent2>""" ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
_lowerCAmelCase : Optional[Any] = state_dict["""embeddings.word_embeddings.weight"""]
_lowerCAmelCase : Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_lowerCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_lowerCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase : Any = f"encoder.layer.{layer_index}.attention.self."
_lowerCAmelCase : str = state_dict[prefix + matrix_name]
_lowerCAmelCase : Dict = state_dict[prefix + matrix_name]
_lowerCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase : Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowerCAmelCase : Dict = entity_emb[entity_vocab["""[MASK]"""]]
_lowerCAmelCase : int = LukeModel(config=_lowerCamelCase ).eval()
_lowerCAmelCase , _lowerCAmelCase : str = model.load_state_dict(_lowerCamelCase ,strict=_lowerCamelCase )
if not (len(_lowerCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(_lowerCamelCase )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
_lowerCAmelCase : List[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase ,task="""entity_classification""" )
_lowerCAmelCase : int = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_lowerCAmelCase : Dict = (39, 42)
_lowerCAmelCase : Optional[Any] = tokenizer(_lowerCamelCase ,entity_spans=[span] ,add_prefix_space=_lowerCamelCase ,return_tensors="""pt""" )
_lowerCAmelCase : List[str] = model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
_lowerCAmelCase : List[str] = torch.Size((1, 42, 1024) )
_lowerCAmelCase : List[str] = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
_lowerCAmelCase : int = torch.Size((1, 42, 768) )
_lowerCAmelCase : Tuple = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_lowerCamelCase ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCAmelCase : Optional[int] = torch.Size((1, 1, 1024) )
_lowerCAmelCase : Tuple = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
_lowerCAmelCase : Optional[int] = torch.Size((1, 1, 768) )
_lowerCAmelCase : Dict = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,_lowerCamelCase ,atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Dict:
_lowerCAmelCase : List[str] = {}
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = line.rstrip().split("""\t""" )
_lowerCAmelCase : Any = index
return entity_vocab
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_a : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 1
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 1
|
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = WavaVecaPhonemeCTCTokenizer
_UpperCamelCase : Optional[int] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Union[str, Any] = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_lowerCAmelCase : str = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : Union[str, Any] = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
def __A ( self , a__ , a__=False , a__=20 , a__=5 ):
_lowerCAmelCase : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )) for i in range(len(a__ ) )]
_lowerCAmelCase : Union[str, Any] = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
_lowerCAmelCase : Union[str, Any] = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
_lowerCAmelCase : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase : str = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase : Optional[int] = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
_lowerCAmelCase : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
_lowerCAmelCase : str = """ """ + output_txt
_lowerCAmelCase : Any = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_lowerCAmelCase : Optional[Any] = tokenizer("""m xxx ɪ""" , do_phonemize=a__ ).input_ids
self.assertEqual(a__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_lowerCAmelCase : Optional[int] = tokenizer("""m aaa ɪ ccc""" , do_phonemize=a__ ).input_ids
self.assertEqual(a__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
_lowerCAmelCase : Optional[Any] = tokenizer("""maɪ c""" , do_phonemize=a__ ).input_ids
self.assertEqual(a__ , [3, 200] ) # mai should be <unk> (=3)
def __A ( self ):
_lowerCAmelCase : Any = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Optional[int] = """Hello how are you"""
_lowerCAmelCase : Optional[int] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(a__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def __A ( self ):
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : str = """Hello how are you"""
_lowerCAmelCase : List[str] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a__ ).input_ids , tokenizer(a__ , do_phonemize=a__ ).input_ids )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Any = """Hello how are you"""
_lowerCAmelCase : Union[str, Any] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(tokenizer(a__ ).input_ids )
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_lowerCAmelCase : Tuple = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , batch_tokens[0] )
self.assertEqual(a__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def __A ( self ):
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : str = """Hello how are you"""
_lowerCAmelCase : Tuple = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(a__ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def __A ( self ):
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : List[str] = """Hello how are you"""
_lowerCAmelCase : List[Any] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a__ ).input_ids , tokenizer(a__ , do_phonemize=a__ ).input_ids )
def __A ( self ):
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_lowerCAmelCase : str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_lowerCAmelCase : Optional[Any] = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , batch_tokens[0] )
self.assertEqual(a__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_lowerCAmelCase : Dict = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=a__ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(a__ , filter_word_delimiter_token=a__ )
self.assertEqual(a__ , batch_tokens[0] )
self.assertEqual(a__ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : Tuple = """Hello how are you"""
_lowerCAmelCase : int = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
_lowerCAmelCase : Dict = tokenizer.decode(tokenizer(a__ ).input_ids , filter_word_delimiter_token=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : str = """Hello how are you"""
_lowerCAmelCase : Optional[int] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
_lowerCAmelCase : int = tokenizer.decode(tokenizer(a__ ).input_ids , filter_word_delimiter_token=a__ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , a__ )
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=a__ )
_lowerCAmelCase : Any = """Hello how are you"""
_lowerCAmelCase : str = tokenizer(a__ , phonemizer_lang="""en-us""" ).input_ids
_lowerCAmelCase : Dict = tokenizer(a__ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(a__ , a__ )
_lowerCAmelCase : Dict = tokenizer.decode(a__ )
_lowerCAmelCase : List[str] = tokenizer.decode(a__ )
self.assertEqual(a__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(a__ , """ɛ l o h aʊ a ʁ j u""" )
def __A ( self ):
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Optional[Any] = """Hello how Are you"""
_lowerCAmelCase : str = """hello how are you"""
_lowerCAmelCase : int = tokenizer(a__ ).input_ids
_lowerCAmelCase : Optional[int] = tokenizer(a__ ).input_ids
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_lowerCAmelCase : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def __A ( a__ , a__ ):
_lowerCAmelCase : List[str] = [d[key] for d in offsets]
return retrieved_list
def __A ( self ):
_lowerCAmelCase : Tuple = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_lowerCAmelCase : str = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_lowerCAmelCase : Optional[int] = tokenizer.decode(a__ , output_char_offsets=a__ , filter_word_delimiter_token=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(a__ , a__ ):
self.assertTrue(isinstance(a__ , a__ ) )
self.assertTrue(isinstance(outputs_list[0] , a__ ) )
# transform list to ModelOutput
_lowerCAmelCase : Any = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(a__ , a__ ):
if isinstance(a__ , a__ ):
[recursive_check(a__ , a__ ) for la, la in zip(a__ , a__ )]
self.assertEqual(a__ , a__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
_lowerCAmelCase : Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(a__ , output_char_offsets=a__ )
_lowerCAmelCase : str = [tokenizer.decode(a__ , output_char_offsets=a__ ) for ids in sample_ids]
check_list_tuples_equal(a__ , a__ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def __A ( self ):
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def __A ( self ):
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def __A ( self ):
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : int = tokenizer.vocab_size
_lowerCAmelCase : List[str] = len(a__ )
self.assertNotEqual(a__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCAmelCase : Optional[int] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_lowerCAmelCase : str = tokenizer.add_tokens(a__ )
_lowerCAmelCase : Any = tokenizer.vocab_size
_lowerCAmelCase : str = len(a__ )
self.assertNotEqual(a__ , 0 )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , len(a__ ) )
self.assertEqual(a__ , all_size + len(a__ ) )
_lowerCAmelCase : List[Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=a__ )
self.assertGreaterEqual(len(a__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_lowerCAmelCase : int = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_lowerCAmelCase : List[Any] = tokenizer.add_special_tokens(a__ )
_lowerCAmelCase : Tuple = tokenizer.vocab_size
_lowerCAmelCase : List[Any] = len(a__ )
self.assertNotEqual(a__ , 0 )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , len(a__ ) )
self.assertEqual(a__ , all_size_a + len(a__ ) )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=a__ )
self.assertGreaterEqual(len(a__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def __A ( self ):
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def __A ( self ):
pass
def __A ( self ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_lowerCAmelCase : Optional[int] = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : str = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(output["""text"""] , a__ )
| 44
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 1
|
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_a : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]=None ,_lowerCamelCase : Union[str, Any]=None ) -> Dict:
return field(default_factory=lambda: default ,metadata=_lowerCamelCase )
@dataclass
class __A :
_UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
_UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
_UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Use FP16 to accelerate inference."} )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Benchmark training of model"} )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Verbose memory tracing"} )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Trace memory line by line"} )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Save result to a CSV file"} )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Save all print statements in a log file"} )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to print environment information"} )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
_UpperCamelCase : str = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , )
_UpperCamelCase : str = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
_UpperCamelCase : str = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
_UpperCamelCase : str = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
_UpperCamelCase : str = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , )
_UpperCamelCase : str = field(
default=f"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , )
_UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __A ( self ):
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , a__ , )
def __A ( self ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __A ( self ):
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __A ( self ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 44
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.