code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCAmelCase , encoding="utf-8" ) as input_file:
UpperCAmelCase_ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase_ = input_file.read()
UpperCAmelCase_ = regexp.search(_UpperCAmelCase )
return match
def lowercase__ ( self : Any , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , encoding="utf-8" ) as input_file:
UpperCAmelCase_ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCAmelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = Path("./datasets" )
UpperCAmelCase_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Path("./datasets" )
UpperCAmelCase_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 82
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
# Load configuration defined in the metadata file
with open(snake_case ) as metadata_file:
SCREAMING_SNAKE_CASE:str = json.load(snake_case )
SCREAMING_SNAKE_CASE:List[str] = LukeConfig(use_entity_aware_attention=snake_case , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE:Tuple = torch.load(snake_case , map_location="cpu" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE:Dict = load_entity_vocab(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE:Dict = AddedToken("<ent>" , lstrip=snake_case , rstrip=snake_case )
SCREAMING_SNAKE_CASE:List[Any] = AddedToken("<ent2>" , lstrip=snake_case , rstrip=snake_case )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case )
with open(os.path.join(snake_case , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(snake_case , snake_case )
SCREAMING_SNAKE_CASE:str = LukeTokenizer.from_pretrained(snake_case )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE:Optional[Any] = state_dict["embeddings.word_embeddings.weight"]
SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE:Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE:List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE:str = state_dict["entity_embeddings.entity_embeddings.weight"]
SCREAMING_SNAKE_CASE:int = entity_emb[entity_vocab["[MASK]"]]
SCREAMING_SNAKE_CASE:str = LukeModel(config=snake_case ).eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = model.load_state_dict(snake_case , strict=snake_case )
if not (len(snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
SCREAMING_SNAKE_CASE:Optional[Any] = LukeTokenizer.from_pretrained(snake_case , task="entity_classification" )
SCREAMING_SNAKE_CASE:Tuple = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
SCREAMING_SNAKE_CASE:List[str] = (39, 42)
SCREAMING_SNAKE_CASE:int = tokenizer(snake_case , entity_spans=[span] , add_prefix_space=snake_case , return_tensors="pt" )
SCREAMING_SNAKE_CASE:Any = model(**snake_case )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 1024) )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 768) )
SCREAMING_SNAKE_CASE:int = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.Size((1, 1, 1024) )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE:Any = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(snake_case ) )
model.save_pretrained(snake_case )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = {}
with open(snake_case , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = line.rstrip().split("\t" )
SCREAMING_SNAKE_CASE:str = index
return entity_vocab
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 143
| 0
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a ="""\
"""
a ="""
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
a ="""
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[int]):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string'),
}) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any = 1_6 ,SCREAMING_SNAKE_CASE__ : Optional[Any] = True ,SCREAMING_SNAKE_CASE__ : Dict=None):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCamelCase : int = "cuda"
else:
__lowerCamelCase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
__lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained(snake_case_)
__lowerCamelCase : Optional[int] = model.to(snake_case_)
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(snake_case_)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCamelCase : Optional[int] = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(snake_case_) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCamelCase : Optional[Any] = model.config.max_length - 1
else:
__lowerCamelCase : Optional[Any] = model.config.max_length
__lowerCamelCase : List[Any] = tokenizer(
snake_case_ ,add_special_tokens=snake_case_ ,padding=snake_case_ ,truncation=snake_case_ ,max_length=snake_case_ ,return_tensors='pt' ,return_attention_mask=snake_case_ ,).to(snake_case_)
__lowerCamelCase : int = encodings["input_ids"]
__lowerCamelCase : Optional[int] = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) ,1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) ,2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCamelCase : Tuple = []
__lowerCamelCase : Union[str, Any] = CrossEntropyLoss(reduction='none')
for start_index in logging.tqdm(range(0 ,len(snake_case_) ,snake_case_)):
__lowerCamelCase : Dict = min(start_index + batch_size ,len(snake_case_))
__lowerCamelCase : Optional[Any] = encoded_texts[start_index:end_index]
__lowerCamelCase : Union[str, Any] = attn_masks[start_index:end_index]
if add_start_token:
__lowerCamelCase : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(snake_case_)
__lowerCamelCase : Optional[int] = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1)
__lowerCamelCase : str = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa).to(snake_case_), attn_mask] ,dim=1)
__lowerCamelCase : Optional[int] = encoded_batch
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(snake_case_ ,attention_mask=snake_case_).logits
__lowerCamelCase : Optional[Any] = out_logits[..., :-1, :].contiguous()
__lowerCamelCase : int = labels[..., 1:].contiguous()
__lowerCamelCase : Tuple = attn_mask[..., 1:].contiguous()
__lowerCamelCase : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2) ,snake_case_) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case_)}
| 721
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[int] = []
for line in lines:
__lowerCamelCase : Any = re.sub(R'#.*' , '' , lowerCamelCase__ ) # remove comments
if line:
filtered_lines.append(lowerCamelCase__ )
__lowerCamelCase : int = '\n'.join(lowerCamelCase__ )
# Make a hash from all this code
__lowerCamelCase : Tuple = full_str.encode('utf-8' )
return shaaaa(lowerCamelCase__ ).hexdigest()
# get importable module names and hash for caching
a ={
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
a ={
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
a ={"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
a ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 337
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
UpperCamelCase_ : Tuple = '''CIDAS/clipseg-rd64-refined'''
UpperCamelCase_ : Tuple = '''image_segmenter'''
UpperCamelCase_ : Optional[int] = CLIPSegForImageSegmentation
UpperCamelCase_ : Optional[Any] = ['''image''', '''text''']
UpperCamelCase_ : Optional[int] = ['''image''']
def __init__( self : Dict , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : "Image" , lowerCAmelCase__ : str ) -> int:
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase__ , return_tensors="pt" )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
with torch.no_grad():
_UpperCAmelCase : Optional[int] = self.model(**lowerCAmelCase__ ).logits
return logits
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : str = outputs.cpu().detach().numpy()
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 494
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : int ) -> None:
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 494
| 1
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCAmelCase__ =logging.getLogger(__name__)
@dataclass
class A__:
lowerCAmelCase = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class A__:
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
datasets.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__SCREAMING_SNAKE_CASE = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__SCREAMING_SNAKE_CASE = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = train_dataset.features['''label'''].names
if training_args.do_eval:
__SCREAMING_SNAKE_CASE = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = eval_dataset.features['''label'''].names
if training_args.do_predict:
__SCREAMING_SNAKE_CASE = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = predict_dataset.features['''label'''].names
# Labels
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel={str(UpperCAmelCase__ ): label for i, label in enumerate(UpperCAmelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
def preprocess_function(UpperCAmelCase__ ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=UpperCAmelCase__ , max_length=data_args.max_seq_length , truncation=UpperCAmelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(UpperCAmelCase__ ) , data_args.max_train_samples )
__SCREAMING_SNAKE_CASE = train_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__SCREAMING_SNAKE_CASE = train_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCAmelCase__ ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(UpperCAmelCase__ ) , data_args.max_eval_samples )
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__SCREAMING_SNAKE_CASE = eval_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(UpperCAmelCase__ ) , data_args.max_predict_samples )
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
__SCREAMING_SNAKE_CASE = predict_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
__SCREAMING_SNAKE_CASE = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase__ ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ )
)
__SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , UpperCAmelCase__ )
trainer.save_metrics('''train''' , UpperCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics('''eval''' , UpperCAmelCase__ )
trainer.save_metrics('''eval''' , UpperCAmelCase__ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = trainer.predict(UpperCAmelCase__ , metric_key_prefix='''predict''' )
__SCREAMING_SNAKE_CASE = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCAmelCase__ )
)
__SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics('''predict''' , UpperCAmelCase__ )
trainer.save_metrics('''predict''' , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 690
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__( __magic_name__ ):
lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa'''
lowerCAmelCase = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
lowerCAmelCase = '''document_qa'''
lowerCAmelCase = AutoProcessor
lowerCAmelCase = VisionEncoderDecoderModel
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''text''']
def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : "Image" , __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__SCREAMING_SNAKE_CASE = task_prompt.replace('''{user_input}''' , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE = self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__SCREAMING_SNAKE_CASE , ).sequences
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__SCREAMING_SNAKE_CASE = re.sub(r'''<.*?>''' , '''''' , __SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
__SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(__SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 690
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Path , __magic_name__ : str = None , __magic_name__ : str = None , __magic_name__ : str = None , ) -> Optional[Any]:
'''simple docstring'''
if config_name_or_path is None:
snake_case__ : List[str] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
snake_case__ : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
snake_case__ : Union[str, Any] = question_encoder_name_or_path
snake_case__ : Union[str, Any] = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
snake_case__ : Tuple = RagConfig.from_pretrained(__magic_name__ )
snake_case__ : int = AutoConfig.from_pretrained(__magic_name__ )
snake_case__ : List[str] = AutoConfig.from_pretrained(__magic_name__ )
snake_case__ : Optional[int] = gen_config
snake_case__ : str = question_encoder_config
snake_case__ : Any = model_class.from_pretrained_question_encoder_generator(
__magic_name__ , __magic_name__ , config=__magic_name__ )
rag_model.save_pretrained(__magic_name__ )
# Sanity check.
model_class.from_pretrained(__magic_name__ )
# Save tokenizers.
snake_case__ : int = AutoTokenizer.from_pretrained(__magic_name__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(__magic_name__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
A_ : Tuple = parser.parse_args()
A_ : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38
| 1
|
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase : List[str] = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """tapas"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=3_0522 , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Any=3072 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : int=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : int=10.0 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Tuple=1.0 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=1.0 , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]="ratio" , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=64 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_sizes
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
# Fine-tuning task hyperparameters
__magic_name__ = positive_label_weight
__magic_name__ = num_aggregation_labels
__magic_name__ = aggregation_loss_weight
__magic_name__ = use_answer_as_supervision
__magic_name__ = answer_loss_importance
__magic_name__ = use_normalized_answer_loss
__magic_name__ = huber_loss_delta
__magic_name__ = temperature
__magic_name__ = aggregation_temperature
__magic_name__ = use_gumbel_for_cells
__magic_name__ = use_gumbel_for_aggregation
__magic_name__ = average_approximation_function
__magic_name__ = cell_selection_preference
__magic_name__ = answer_loss_cutoff
__magic_name__ = max_num_rows
__magic_name__ = max_num_columns
__magic_name__ = average_logits_per_cell
__magic_name__ = select_one_column
__magic_name__ = allow_empty_column_selection
__magic_name__ = init_cell_selection_weights_to_zero
__magic_name__ = reset_position_index_per_cell
__magic_name__ = disable_per_token_loss
# Aggregation hyperparameters
__magic_name__ = aggregation_labels
__magic_name__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , UpperCamelCase__ ):
__magic_name__ = {int(UpperCamelCase__ ): v for k, v in aggregation_labels.items()}
| 76
|
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
if tokenize_kwargs is None:
__magic_name__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
__magic_name__ = truncation
__magic_name__ = tokenize_kwargs
__magic_name__ = {}
if return_tensors is not None:
__magic_name__ = return_tensors
return preprocess_params, {}, postprocess_params
def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]:
"""simple docstring"""
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
__magic_name__ = self.model(**UpperCamelCase__ )
return model_outputs
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 76
| 1
|
"""simple docstring"""
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase__ = class_size
UpperCamelCase__ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase__ = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = self.mlp(lowerCamelCase_ )
return logits
| 516
|
"""simple docstring"""
import numpy as np
def snake_case__ ( _snake_case : np.ndarray , _snake_case : float ):
"""simple docstring"""
return np.where(vector > 0 , _snake_case , (alpha * (np.exp(_snake_case ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
for param in module.parameters():
__lowercase = False
def lowercase__ ( ):
'''simple docstring'''
__lowercase = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__lowercase = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def lowercase__ ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = plt.imshow(__UpperCamelCase )
fig.axes.get_xaxis().set_visible(__UpperCamelCase )
fig.axes.get_yaxis().set_visible(__UpperCamelCase )
plt.show()
def lowercase__ ( ):
'''simple docstring'''
__lowercase = datetime.now()
__lowercase = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : str = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['PoolFormerFeatureExtractor']
snake_case : List[Any] = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 339
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class a ( unittest.TestCase ):
__lowercase = StableDiffusionLDMaDPipeline
__lowercase = TEXT_TO_IMAGE_PARAMS
__lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Any =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A__ : int =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A__ : int =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A__ : Tuple =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A__ : Dict =CLIPTextModel(__UpperCamelCase )
A__ : int =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ : Optional[int] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> Dict:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('''mps''' ):
A__ : Dict =torch.manual_seed(__UpperCamelCase )
else:
A__ : List[str] =torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A__ : int ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
A__ : Optional[int] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ : List[Any] =self.get_dummy_components()
A__ : Tuple =StableDiffusionLDMaDPipeline(**__UpperCamelCase )
A__ : List[Any] =ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A__ : Optional[int] =self.get_dummy_inputs(__UpperCamelCase )
A__ : Optional[Any] =ldmad_pipe(**__UpperCamelCase )
A__ , A__ : int =output.rgb, output.depth
A__ : int =rgb[0, -3:, -3:, -1]
A__ : str =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ : Union[str, Any] =np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
A__ : str =np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
A__ : Any =self.get_dummy_components()
A__ : Any =StableDiffusionLDMaDPipeline(**__UpperCamelCase )
A__ : Optional[int] =ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A__ : Any =self.get_dummy_inputs(__UpperCamelCase )
A__ : Tuple =3 * [inputs['''prompt''']]
# forward
A__ : Union[str, Any] =ldmad_pipe(**__UpperCamelCase )
A__ , A__ : int =output.rgb, output.depth
A__ : int =rgb_slice_a[0, -3:, -3:, -1]
A__ : Optional[Any] =depth_slice_a[0, -3:, -1]
A__ : Dict =self.get_dummy_inputs(__UpperCamelCase )
A__ : str =3 * [inputs.pop('''prompt''' )]
A__ : Optional[Any] =ldmad_pipe.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors='''pt''' , )
A__ : Dict =text_inputs['''input_ids'''].to(__UpperCamelCase )
A__ : Tuple =ldmad_pipe.text_encoder(__UpperCamelCase )[0]
A__ : str =prompt_embeds
# forward
A__ : Optional[Any] =ldmad_pipe(**__UpperCamelCase )
A__ , A__ : Optional[Any] =output.rgb, output.depth
A__ : Optional[Any] =rgb_slice_a[0, -3:, -3:, -1]
A__ : List[str] =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ : Dict =self.get_dummy_components()
A__ : Tuple =PNDMScheduler(skip_prk_steps=__UpperCamelCase )
A__ : Optional[int] =StableDiffusionLDMaDPipeline(**__UpperCamelCase )
A__ : Optional[int] =ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A__ : Dict =self.get_dummy_inputs(__UpperCamelCase )
A__ : Tuple ='''french fries'''
A__ : Tuple =ldmad_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
A__ , A__ : List[Any] =output.rgb, output.depth
A__ : int =rgb[0, -3:, -3:, -1]
A__ : Tuple =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ : Dict =np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
A__ : Any =np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 )-> Optional[int]:
'''simple docstring'''
A__ : str =torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A__ : Dict =np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
A__ : int =torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
A__ : str ={
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
A__ : int =StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
A__ : Union[str, Any] =ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A__ : int =self.get_inputs(__UpperCamelCase )
A__ : Union[str, Any] =ldmad_pipe(**__UpperCamelCase )
A__ , A__ : Union[str, Any] =output.rgb, output.depth
A__ : Union[str, Any] =rgb[0, -3:, -3:, -1].flatten()
A__ : Any =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
A__ : Any =np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
A__ : Optional[Any] =np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self )-> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 )-> str:
'''simple docstring'''
A__ : int =torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A__ : List[str] =np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
A__ : str =torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
A__ : Optional[Any] ={
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
A__ : Optional[int] =StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A__ : Tuple =self.get_inputs(__UpperCamelCase )
A__ : str =ldmad_pipe(**__UpperCamelCase )
A__ , A__ : Tuple =output.rgb, output.depth
A__ : int =0.49_5586
A__ : List[Any] =0.3379_5515
A__ : int =112.4_8518
A__ : List[str] =98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
A__ : List[Any] =StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A__ : Optional[Any] =self.get_inputs(__UpperCamelCase )
A__ : List[str] =ldmad_pipe(**__UpperCamelCase )
A__ , A__ : Optional[int] =output.rgb, output.depth
A__ : Optional[Any] =0.419_4127
A__ : Optional[int] =0.3537_5586
A__ : Tuple =0.563_8502
A__ : Tuple =0.3468_6103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 416
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class a ( UpperCamelCase_ ):
__lowercase = """luke"""
def __init__( self , __UpperCamelCase=5_02_67 , __UpperCamelCase=50_00_00 , __UpperCamelCase=7_68 , __UpperCamelCase=2_56 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , )-> Dict:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
A__ : List[str] =vocab_size
A__ : Any =entity_vocab_size
A__ : List[Any] =hidden_size
A__ : Union[str, Any] =entity_emb_size
A__ : Dict =num_hidden_layers
A__ : List[Any] =num_attention_heads
A__ : List[str] =hidden_act
A__ : Dict =intermediate_size
A__ : Any =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : int =type_vocab_size
A__ : Optional[Any] =initializer_range
A__ : Optional[int] =layer_norm_eps
A__ : str =use_entity_aware_attention
A__ : str =classifier_dropout
| 416
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20
|
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20
| 1
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : int , ) ->Tuple:
snake_case_ = parent
snake_case_ = 1_3
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = 9_9
snake_case_ = 3_2
snake_case_ = 2
snake_case_ = 4
snake_case_ = 3_7
snake_case_ = '''gelu'''
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 5_1_2
snake_case_ = 1_6
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = None
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Any ) ->List[str]:
snake_case_ = TFDistilBertModel(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ = model(_UpperCamelCase )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->str:
snake_case_ = TFDistilBertForMaskedLM(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ) ->Optional[int]:
snake_case_ = TFDistilBertForQuestionAnswering(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple ) ->str:
snake_case_ = self.num_labels
snake_case_ = TFDistilBertForSequenceClassification(_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) ->Optional[Any]:
snake_case_ = self.num_choices
snake_case_ = TFDistilBertForMultipleChoice(_UpperCamelCase )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) ->Optional[Any]:
snake_case_ = self.num_labels
snake_case_ = TFDistilBertForTokenClassification(_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Union[str, Any] ) ->int:
snake_case_ = self.prepare_config_and_inputs()
((snake_case_), (snake_case_), (snake_case_), (snake_case_), (snake_case_), (snake_case_)) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
SCREAMING_SNAKE_CASE : List[Any] = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = TFDistilBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , dim=3_7 )
def snake_case__( self : str ) ->Union[str, Any]:
self.config_tester.run_common_tests()
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_UpperCamelCase )
def snake_case__( self : str ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->Union[str, Any]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case_ = TFDistilBertModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 )
| 39
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args)
| 39
| 1
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__snake_case : str = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
__snake_case : int = F"""https://www.google.com/search?q={query}&num=100"""
__snake_case : str = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
__snake_case : Tuple = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
__snake_case : Optional[Any] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 615
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Any=0) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.random.RandomState(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.get_dummy_inputs()
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : List[str] = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
__lowerCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : str = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.get_dummy_inputs()
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Any = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.get_dummy_inputs()
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Tuple = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : List[Any] = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Optional[Any] = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.get_dummy_inputs()
__lowerCAmelCase : List[str] = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__lowerCAmelCase : Union[str, Any] = 3 * [inputs.pop("prompt")]
__lowerCAmelCase : Union[str, Any] = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , )
__lowerCAmelCase : Dict = text_inputs["input_ids"]
__lowerCAmelCase : str = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
__lowerCAmelCase : Union[str, Any] = prompt_embeds
# forward
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self.get_dummy_inputs()
__lowerCAmelCase : Optional[int] = 3 * ["this is a negative prompt"]
__lowerCAmelCase : Union[str, Any] = negative_prompt
__lowerCAmelCase : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__lowerCAmelCase : Any = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = 3 * [inputs.pop("prompt")]
__lowerCAmelCase : Dict = []
for p in [prompt, negative_prompt]:
__lowerCAmelCase : Optional[Any] = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , )
__lowerCAmelCase : Any = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
__lowerCAmelCase , __lowerCAmelCase : List[str] = embeds
# forward
__lowerCAmelCase : int = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = ort.SessionOptions()
__lowerCAmelCase : List[str] = False
return options
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
np.random.seed(0)
__lowerCAmelCase : str = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np")
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Dict = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: str) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = "open neural network exchange"
__lowerCAmelCase : Union[str, Any] = np.random.RandomState(0)
__lowerCAmelCase : List[str] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np")
__lowerCAmelCase : Tuple = output.images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Optional[Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = "open neural network exchange"
__lowerCAmelCase : Any = np.random.RandomState(0)
__lowerCAmelCase : int = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np")
__lowerCAmelCase : Optional[Any] = output.images
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : List[Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = 0
def test_callback_fn(_SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: np.ndarray) -> None:
__lowerCAmelCase : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase : Optional[int] = latents[0, -3:, -3:, -1]
__lowerCAmelCase : List[str] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase : Tuple = latents[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = "Andromeda galaxy in a bottle"
__lowerCAmelCase : Any = np.random.RandomState(0)
pipe(
prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
assert pipe.safety_checker is None
__lowerCAmelCase : Optional[Any] = pipe("example prompt" , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCAmelCase : Optional[Any] = pipe("example prompt" , num_inference_steps=2).images[0]
assert image is not None
| 615
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( A__ , A__ ):
"""simple docstring"""
_lowercase = 1
@register_to_config
def __init__( self : Optional[Any] , lowerCamelCase__ : int=2_000 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Tuple=20 , lowerCamelCase__ : Dict=1E-3 ):
a__ : Union[str, Any] = None
a__ : Dict = None
a__ : List[str] = None
def _UpperCamelCase( self : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, torch.device] = None ):
a__ : Optional[Any] = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase__ , device=lowerCamelCase__ )
def _UpperCamelCase( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
a__ : Optional[int] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
a__ : Optional[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
a__ : Any = std.flatten()
while len(std.shape ) < len(score.shape ):
a__ : Union[str, Any] = std.unsqueeze(-1 )
a__ : int = -score / std
# compute
a__ : List[Any] = -1.0 / len(self.timesteps )
a__ : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
a__ : Any = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
a__ : int = beta_t.unsqueeze(-1 )
a__ : Any = -0.5 * beta_t * x
a__ : Tuple = torch.sqrt(lowerCamelCase__ )
a__ : int = drift - diffusion**2 * score
a__ : int = x + drift * dt
# add noise
a__ : Optional[int] = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase__ , device=x.device , dtype=x.dtype )
a__ : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Any ):
return self.config.num_train_timesteps
| 37
|
def UpperCamelCase_ ( __a = 50 ) -> int:
a__ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase_ : Optional[Any] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ) -> int:
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: Dict= os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
SCREAMING_SNAKE_CASE__: List[Any]= os.path.abspath('''examples''' )
for item in os.listdir(lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE__: Tuple= os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.isfile(lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase , feature_script=lowerCAmelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
SCREAMING_SNAKE_CASE__: List[Any]= compare_against_test(
os.path.join(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= '''\n'''.join(lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE__: Union[str, Any]= diff.replace(lowerCAmelCase , '''''' )
self.assertEqual(lowerCAmelCase , '''''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
self.one_complete_example('''complete_nlp_example.py''' , lowerCAmelCase )
self.one_complete_example('''complete_nlp_example.py''' , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
SCREAMING_SNAKE_CASE__: Dict= [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.one_complete_example('''complete_cv_example.py''' , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _lowerCamelCase ( UpperCamelCase_ ):
__a = False
@classmethod
def UpperCamelCase_ ( cls ) -> List[Any]:
super().setUpClass()
SCREAMING_SNAKE_CASE__: List[Any]= tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__: Tuple= os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE__: Dict= ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase_ ( cls ) -> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Dict= f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: List[Any]= f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
SCREAMING_SNAKE_CASE__: List[Any]= run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
SCREAMING_SNAKE_CASE__: Dict= run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase )
self.assertNotIn('''epoch 0:''' , lowerCAmelCase )
self.assertIn('''epoch 1:''' , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
SCREAMING_SNAKE_CASE__: Dict= run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase )
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE__: List[Any]= 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , lowerCAmelCase )
self.assertIn('''epoch 1:''' , lowerCAmelCase )
else:
self.assertIn('''epoch 0:''' , lowerCAmelCase )
self.assertIn('''epoch 1:''' , lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
SCREAMING_SNAKE_CASE__: Any= run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= re.findall('''({.+})''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= [r for r in results if '''accuracy''' in r][-1]
SCREAMING_SNAKE_CASE__: List[str]= ast.literal_eval(lowerCAmelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase_ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE__: Optional[int]= f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , '''tracking''' ) ) )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: int= ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[str]= ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 107
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase_ : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _lowerCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase ) -> str:
super().__init__()
SCREAMING_SNAKE_CASE__: Union[str, Any]= torchvision.models.resnetaaa(pretrained=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= list(model.children() )[:-2]
SCREAMING_SNAKE_CASE__: Tuple= nn.Sequential(*lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[str]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
SCREAMING_SNAKE_CASE__: str= self.pool(self.model(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: str= torch.flatten(lowerCAmelCase , start_dim=2 )
SCREAMING_SNAKE_CASE__: Optional[Any]= out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Optional[int]= [json.loads(lowerCAmelCase ) for l in open(lowerCAmelCase )]
SCREAMING_SNAKE_CASE__: List[str]= os.path.dirname(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= tokenizer
SCREAMING_SNAKE_CASE__: Any= labels
SCREAMING_SNAKE_CASE__: List[Any]= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= transforms
def __len__( self ) -> Optional[Any]:
return len(self.data )
def __getitem__( self , lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE__: Tuple= sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE__: Dict= 1
SCREAMING_SNAKE_CASE__: Optional[int]= Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: int= self.transforms(lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[Any]= Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Any= [len(row['''sentence'''] ) for row in batch]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= len(snake_case_ ), max(snake_case_ )
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.zeros(snake_case_ , snake_case_ , dtype=torch.long )
SCREAMING_SNAKE_CASE__: List[Any]= torch.zeros(snake_case_ , snake_case_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(snake_case_ , snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Optional[int]= input_row['''sentence''']
SCREAMING_SNAKE_CASE__: Dict= 1
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.stack([row['''image'''] for row in batch] )
SCREAMING_SNAKE_CASE__: List[str]= torch.stack([row['''label'''] for row in batch] )
SCREAMING_SNAKE_CASE__: Any= torch.stack([row['''image_start_token'''] for row in batch] )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A__ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A__ ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 107
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = False ) -> Union[str, Any]:
_A = scheduler
_A = optimizers if isinstance(__lowerCAmelCase , (list, tuple) ) else [optimizers]
_A = split_batches
_A = step_with_optimizer
_A = GradientState()
def snake_case_ ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[Any] ) -> Optional[int]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_A = AcceleratorState().num_processes
for _ in range(__lowerCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
else:
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : int ) -> List[Any]:
return self.scheduler.get_last_lr()
def snake_case_ ( self : str ) -> Optional[int]:
return self.scheduler.state_dict()
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
self.scheduler.load_state_dict(__lowerCAmelCase )
def snake_case_ ( self : str ) -> int:
return self.scheduler.get_lr()
def snake_case_ ( self : Optional[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[Any] ) -> Tuple:
return self.scheduler.print_lr(*__lowerCAmelCase , **__lowerCAmelCase )
| 2
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = '''autoformer'''
_lowerCAmelCase : Any = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = "student_t" , lowercase__ = "nll" , lowercase__ = 1 , lowercase__ = [1, 2, 3, 4, 5, 6, 7] , lowercase__ = True , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = 6_4 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 3_2 , lowercase__ = 3_2 , lowercase__ = "gelu" , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 1_0_0 , lowercase__ = 0.0_2 , lowercase__ = True , lowercase__=True , lowercase__ = 1_0 , lowercase__ = 2_5 , lowercase__ = 3 , **lowercase__ , ):
# time series specific configuration
__UpperCAmelCase : Optional[int] = prediction_length
__UpperCAmelCase : Tuple = context_length if context_length is not None else prediction_length
__UpperCAmelCase : Tuple = distribution_output
__UpperCAmelCase : Tuple = loss
__UpperCAmelCase : Dict = input_size
__UpperCAmelCase : str = num_time_features
__UpperCAmelCase : str = lags_sequence
__UpperCAmelCase : int = scaling
__UpperCAmelCase : int = num_dynamic_real_features
__UpperCAmelCase : List[str] = num_static_real_features
__UpperCAmelCase : List[str] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase__) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''')
__UpperCAmelCase : Union[str, Any] = cardinality
else:
__UpperCAmelCase : List[Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase__) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''')
__UpperCAmelCase : Union[str, Any] = embedding_dimension
else:
__UpperCAmelCase : int = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
__UpperCAmelCase : Tuple = num_parallel_samples
# Transformer architecture configuration
__UpperCAmelCase : Dict = input_size * len(self.lags_sequence) + self._number_of_features
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : str = encoder_ffn_dim
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : Union[str, Any] = dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : str = encoder_layerdrop
__UpperCAmelCase : Dict = decoder_layerdrop
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Optional[int] = init_std
__UpperCAmelCase : Optional[int] = use_cache
# Autoformer
__UpperCAmelCase : str = label_length
__UpperCAmelCase : int = moving_average
__UpperCAmelCase : Optional[int] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__)
@property
def A( self):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 462
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A ,"""num_attention_heads""" ) )
self.parent.assertTrue(hasattr(A ,"""num_encoder_blocks""" ) )
class __lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : Dict ,A : List[Any]=13 ,A : List[Any]=64 ,A : Optional[Any]=3 ,A : Union[str, Any]=4 ,A : Any=[2, 2, 2, 2] ,A : str=[8, 4, 2, 1] ,A : Dict=[16, 32, 64, 128] ,A : Any=[1, 4, 8, 16] ,A : List[str]=[1, 2, 4, 8] ,A : Dict=True ,A : Any=True ,A : List[str]="gelu" ,A : str=0.1 ,A : Any=0.1 ,A : Union[str, Any]=0.0_2 ,A : Optional[Any]=3 ,A : Optional[int]=None ,):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : Any = num_encoder_blocks
UpperCAmelCase__ : List[str] = sr_ratios
UpperCAmelCase__ : Union[str, Any] = depths
UpperCAmelCase__ : Optional[Any] = hidden_sizes
UpperCAmelCase__ : Any = downsampling_rates
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : Dict = scope
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : int = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase__ : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : List[str] ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : int ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = SegformerModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : List[Any] = model(A )
UpperCAmelCase__ : Dict = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __lowercase ( self : str ,A : List[str] ,A : List[Any] ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.num_labels
UpperCAmelCase__ : Tuple = SegformerForSemanticSegmentation(A )
model.to(A )
model.eval()
UpperCAmelCase__ : List[Any] = model(A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase__ : List[str] = model(A ,labels=A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def __lowercase ( self : Tuple ,A : Optional[Any] ,A : int ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : List[Any] = SegformerForSemanticSegmentation(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : List[Any] = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(A )
UpperCAmelCase__ : int = model(A ,labels=A )
self.parent.assertGreater(result.loss ,0.0 )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = SegformerModelTester(self )
UpperCAmelCase__ : Optional[int] = SegformerConfigTester(self ,config_class=A )
def __lowercase ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(A )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[int] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase__ : str = outputs.attentions
UpperCAmelCase__ : Any = sum(self.model_tester.depths )
self.assertEqual(len(A ) ,A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : int = True
UpperCAmelCase__ : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase__ : str = outputs.attentions
self.assertEqual(len(A ) ,A )
# verify the first attentions (first block, first layer)
UpperCAmelCase__ : Any = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase__ : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
UpperCAmelCase__ : List[Any] = (self.model_tester.image_size // 32) ** 2
UpperCAmelCase__ : List[str] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
UpperCAmelCase__ : Dict = len(A )
# Check attention is always last and order is fine
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(A ,A ) )
self.assertEqual(out_len + 1 ,len(A ) )
UpperCAmelCase__ : Tuple = outputs.attentions
self.assertEqual(len(A ) ,A )
# verify the first attentions (first block, first layer)
UpperCAmelCase__ : Optional[Any] = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase__ : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(A : Tuple ,A : Optional[Any] ,A : Tuple ):
UpperCAmelCase__ : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase__ : List[Any] = outputs.hidden_states
UpperCAmelCase__ : List[str] = self.model_tester.num_encoder_blocks
self.assertEqual(len(A ) ,A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : str = True
check_hidden_states_output(A ,A ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : str = True
for model_class in self.all_model_classes:
if model_class in get_values(A ):
continue
UpperCAmelCase__ : List[str] = model_class(A )
model.to(A )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase__ : int = model(**A ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Union[str, Any] = SegformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
# only resize + normalize
UpperCAmelCase__ : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase__ : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
A )
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase__ : Union[str, Any] = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase__ : str = model(A )
UpperCAmelCase__ : Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase__ : Optional[int] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,A ,atol=1e-4 ) )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
# only resize + normalize
UpperCAmelCase__ : str = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase__ : str = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(A )
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : Optional[int] = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase__ : Tuple = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase__ : int = model(A )
UpperCAmelCase__ : str = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,A ,atol=1e-1 ) )
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
# only resize + normalize
UpperCAmelCase__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase__ : List[str] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
A )
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : Any = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase__ : int = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase__ : int = model(A )
UpperCAmelCase__ : Any = outputs.logits.detach().cpu()
UpperCAmelCase__ : Dict = image_processor.post_process_semantic_segmentation(outputs=A ,target_sizes=[(500, 300)] )
UpperCAmelCase__ : int = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,A )
UpperCAmelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=A )
UpperCAmelCase__ : Union[str, Any] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,A )
| 720
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 194
| 0
|
def lowercase_ (A : int ):
snake_case__ : list[list[int]] = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
snake_case__ : List[str] = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a_ :Any = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
a_ :List[str] = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 478
|
a_ :dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
a_ :dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def lowercase_ (A : float , A : str , A : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case__ : Tuple = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {", ".join(A )}'''
)
raise ValueError(A )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Any ) -> List[Any]:
__snake_case = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__snake_case = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__snake_case = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case = model(UpperCAmelCase_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1E-3 ) )
@slow
def lowercase ( self : List[Any] ) -> str:
__snake_case = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__snake_case = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__snake_case = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case = model(UpperCAmelCase_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1E-3 ) )
| 706
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase ( self : Optional[int] , A_ : List[str]=0 ) -> int:
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(A_ ) )
__snake_case = np.random.RandomState(A_ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase ( self : Optional[Any] ) -> List[Any]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase ( self : Tuple ) -> Optional[int]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : Optional[int] ) -> str:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : str ) -> List[str]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : List[str] ) -> Any:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : str ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase ( self : str ) -> Optional[int]:
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 93
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[Any] = 'gptsan-japanese'
SCREAMING_SNAKE_CASE:str = [
'past_key_values',
]
SCREAMING_SNAKE_CASE:int = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=3_6000 , _a=1280 , _a=1024 , _a=8192 , _a=4096 , _a=128 , _a=10 , _a=0 , _a=16 , _a=16 , _a=128 , _a=0.0 , _a=1e-5 , _a=False , _a=0.0 , _a="float32" , _a=False , _a=False , _a=False , _a=0.002 , _a=False , _a=True , _a=3_5998 , _a=3_5995 , _a=3_5999 , **_a , ):
"""simple docstring"""
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = d_ff
a__ = d_ext
a__ = d_spout
a__ = num_switch_layers
a__ = num_ext_layers
a__ = num_switch_layers + num_ext_layers
a__ = num_heads
a__ = num_experts
a__ = expert_capacity
a__ = dropout_rate
a__ = layer_norm_epsilon
a__ = router_bias
a__ = router_jitter_noise
a__ = router_dtype
a__ = router_ignore_padding_tokens
a__ = output_hidden_states
a__ = output_attentions
a__ = initializer_factor
a__ = output_router_logits
a__ = use_cache
super().__init__(
separator_token_id=_a , pad_token_id=_a , eos_token_id=_a , **_a , )
| 394
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["MobileNetV2FeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase ( _snake_case : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__ = model_type_to_module_name(_snake_case )
lowercase__ = importlib.import_module(f'''.{module_name}''' ,"transformers.models" )
try:
return getattr(_snake_case ,_snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_snake_case ,"__name__" ,_snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__ = importlib.import_module("transformers" )
if hasattr(_snake_case ,_snake_case ):
return getattr(_snake_case ,_snake_case )
return None
def lowerCamelCase ( _snake_case : Union[str, os.PathLike] ,_snake_case : Optional[Union[str, os.PathLike]] = None ,_snake_case : bool = False ,_snake_case : bool = False ,_snake_case : Optional[Dict[str, str]] = None ,_snake_case : Optional[Union[bool, str]] = None ,_snake_case : Optional[str] = None ,_snake_case : bool = False ,**_snake_case : Union[str, Any] ,):
'''simple docstring'''
lowercase__ = get_file_from_repo(
_snake_case ,_snake_case ,cache_dir=_snake_case ,force_download=_snake_case ,resume_download=_snake_case ,proxies=_snake_case ,use_auth_token=_snake_case ,revision=_snake_case ,local_files_only=_snake_case ,)
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(_snake_case ,encoding="utf-8" ) as reader:
return json.load(_snake_case )
class snake_case :
def __init__( self ) -> List[str]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def _a ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Tuple:
lowercase__ = kwargs.pop("config" ,UpperCAmelCase_ )
lowercase__ = kwargs.pop("trust_remote_code" ,UpperCAmelCase_ )
lowercase__ = True
lowercase__ , lowercase__ = FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = config_dict.get("feature_extractor_type" ,UpperCAmelCase_ )
lowercase__ = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" ,{} ):
lowercase__ = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ = AutoConfig.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
# It could be in `config.feature_extractor_type``
lowercase__ = getattr(UpperCAmelCase_ ,"feature_extractor_type" ,UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ ,"auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
lowercase__ = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
lowercase__ = feature_extractor_class_from_name(UpperCAmelCase_ )
lowercase__ = feature_extractor_auto_map is not None
lowercase__ = feature_extractor_class is not None or type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING
lowercase__ = resolve_trust_remote_code(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
lowercase__ = get_class_from_dynamic_module(
UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = kwargs.pop("code_revision" ,UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
lowercase__ = FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase_ )]
return feature_extractor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _a ( UpperCAmelCase_ ,UpperCAmelCase_ ) -> Any:
FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase_ ,UpperCAmelCase_ )
| 539
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowerCamelCase__ ( )-> Tuple:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=UpperCAmelCase_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=UpperCAmelCase_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=UpperCAmelCase_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=UpperCAmelCase_ , default=10_00 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=UpperCAmelCase_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=UpperCAmelCase_ , default=5_12 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=UpperCAmelCase_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
UpperCamelCase = parser.parse_args()
return args
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
def fn(UpperCAmelCase_ ):
return tokenizer(examples["text"] )
return fn
def lowerCamelCase__ ( UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCamelCase = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCamelCase = tf.train.Features(feature=UpperCAmelCase_ )
UpperCamelCase = tf.train.Example(features=UpperCAmelCase_ )
UpperCamelCase = example.SerializeToString()
records.append(UpperCAmelCase_ )
return records
def lowerCamelCase__ ( UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase = min(len(UpperCAmelCase_ ) , args.limit )
UpperCamelCase = dataset.select(range(UpperCAmelCase_ ) )
print(F"Limiting the dataset to {args.limit} entries." )
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
else:
UpperCamelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase = tokenize_function(UpperCAmelCase_ )
UpperCamelCase = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCAmelCase_ ):
# Concatenate all texts.
UpperCamelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCAmelCase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase = dataset_tokenized.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=10_00 , num_proc=4 )
UpperCamelCase = 0
UpperCamelCase = 0
for shard in range(0 , len(UpperCAmelCase_ ) , args.shard_size ):
UpperCamelCase = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase = len(dataset_snapshot["input_ids"] )
UpperCamelCase = os.path.join(UpperCAmelCase_ , F"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase = get_serialized_examples(UpperCAmelCase_ )
with tf.io.TFRecordWriter(UpperCAmelCase_ ) as out_file:
for i in range(len(UpperCAmelCase_ ) ):
UpperCamelCase = serialized_examples[i]
out_file.write(UpperCAmelCase_ )
print("Wrote file {} containing {} records".format(UpperCAmelCase_ , UpperCAmelCase_ ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , "w" ) as f:
print(F"Total {args.split} records: {total_records}" , file=UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = parse_args()
main(args)
| 554
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : torch.FloatTensor
class __a ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : int , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase_ : Tuple[int] = (64,) , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "silu" , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : float = 0.18215 , UpperCAmelCase_ : str = "group" , )-> Optional[Any]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase = Encoder(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , down_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , double_z=UpperCAmelCase_ , )
UpperCamelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1 )
UpperCamelCase = VectorQuantizer(UpperCAmelCase_ , UpperCAmelCase_ , beta=0.25 , remap=UpperCAmelCase_ , sane_index_shape=UpperCAmelCase_ )
UpperCamelCase = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1 )
# pass init params to Decoder
UpperCamelCase = Decoder(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , up_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , norm_type=UpperCAmelCase_ , )
@apply_forward_hook
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True )-> VQEncoderOutput:
"""simple docstring"""
UpperCamelCase = self.encoder(UpperCAmelCase_ )
UpperCamelCase = self.quant_conv(UpperCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase_ )
@apply_forward_hook
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True )-> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
# also go through quantization layer
if not force_not_quantize:
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.quantize(UpperCAmelCase_ )
else:
UpperCamelCase = h
UpperCamelCase = self.post_quant_conv(UpperCAmelCase_ )
UpperCamelCase = self.decoder(UpperCAmelCase_ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True )-> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
UpperCamelCase = sample
UpperCamelCase = self.encode(UpperCAmelCase_ ).latents
UpperCamelCase = self.decode(UpperCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase_ )
| 554
| 1
|
from numpy import exp, pi, sqrt
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 ) ->int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 627
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__a = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__a = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__a = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[str]:
return float((preds == labels).mean() )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="binary" ) ->Union[str, Any]:
UpperCAmelCase = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[Any]:
UpperCAmelCase = {}
for id_pred, label in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
UpperCAmelCase = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase = [(pred, label)]
UpperCAmelCase , UpperCAmelCase = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase , UpperCAmelCase = zip(*lowerCAmelCase_ )
UpperCAmelCase = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average="""macro""" )
fas.append(lowerCAmelCase_ )
UpperCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase_ ) )
ems.append(lowerCAmelCase_ )
UpperCAmelCase = float(sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) )
UpperCAmelCase = sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
UpperCAmelCase = float(fa_score(y_true=lowerCAmelCase_ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _lowercase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" )
elif self.config_name == "record":
UpperCAmelCase = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCAmelCase = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 627
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A ( snake_case__ ):
'''simple docstring'''
if "model" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.split(""".""" )[0].split("""_""" )[-1]
SCREAMING_SNAKE_CASE__ = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE__ = 'yoso.' + orig_key
return orig_key
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(__A )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE__ = val
SCREAMING_SNAKE_CASE__ = orig_state_dict['cls.predictions.decoder.bias']
SCREAMING_SNAKE_CASE__ = torch.arange(__A ).expand((1, -1) ) + 2
return orig_state_dict
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.load(__A , map_location="""cpu""" )['model_state_dict']
SCREAMING_SNAKE_CASE__ = YosoConfig.from_json_file(__A )
SCREAMING_SNAKE_CASE__ = YosoForMaskedLM(__A )
SCREAMING_SNAKE_CASE__ = convert_checkpoint_helper(config.max_position_embeddings , __A )
print(model.load_state_dict(__A ) )
model.eval()
model.save_pretrained(__A )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Union[str, Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 196
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=[8, 16, 32, 64] , SCREAMING_SNAKE_CASE__=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__=[2, 3, 4] , SCREAMING_SNAKE_CASE__=1 , ):
'''simple docstring'''
snake_case: Dict = parent
snake_case: List[Any] = batch_size
snake_case: Any = image_size
snake_case: Optional[Any] = num_channels
snake_case: List[Any] = embeddings_size
snake_case: Tuple = hidden_sizes
snake_case: str = depths
snake_case: str = is_training
snake_case: List[str] = use_labels
snake_case: Tuple = hidden_act
snake_case: List[str] = num_labels
snake_case: Optional[int] = scope
snake_case: Any = len(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = out_features
snake_case: Optional[Any] = out_indices
snake_case: str = num_groups
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: str = None
if self.use_labels:
snake_case: Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
snake_case: Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = BitModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = self.num_labels
snake_case: int = BitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: List[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: int = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case: str = None
snake_case: int = BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: str = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case: str = config_and_inputs
snake_case: int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = BitModelTester(self )
snake_case: str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Bit does not output attentions' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case: List[str] = [*signature.parameters.keys()]
snake_case: Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Optional[int] = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _UpperCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
snake_case: Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case: Optional[int] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case , snake_case: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: Optional[int] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case: Tuple = layer_type
snake_case: str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case: Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: Union[str, Any] = BitModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.default_image_processor
snake_case: List[Any] = prepare_img()
snake_case: List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
snake_case: Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case: List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (BitBackbone,) if is_torch_available() else ()
__UpperCamelCase = BitConfig
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = BitModelTester(self )
| 329
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "camembert"
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Dict = vocab_size
A__ : Dict = hidden_size
A__ : Dict = num_hidden_layers
A__ : str = num_attention_heads
A__ : List[Any] = hidden_act
A__ : Dict = intermediate_size
A__ : str = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Union[str, Any] = max_position_embeddings
A__ : int = type_vocab_size
A__ : List[str] = initializer_range
A__ : Optional[Any] = layer_norm_eps
A__ : Union[str, Any] = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : str = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def __snake_case ( self ):
if self.task == "multiple-choice":
A__ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 55
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 1
|
import requests
A : str = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def UpperCamelCase ( __magic_name__ : str ) -> None:
"""simple docstring"""
lowercase__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(f'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 15
|
"""simple docstring"""
def _lowerCamelCase( a = 1 , a = 1_0_0_0 ):
__a = 1
__a = 0
for divide_by_number in range(a , digit + 1 ):
__a = []
__a = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(a ):
__a = len(a )
__a = divide_by_number
else:
has_been_divided.append(a )
__a = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528
| 0
|
class _snake_case :
def __init__( self : Dict, __lowercase : Any ):
lowercase__ = val
lowercase__ = None
lowercase__ = None
def A__ ( self : Any, __lowercase : str ):
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
lowercase__ = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
lowercase__ = val
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if root:
inorder(root.left , __A )
res.append(root.val )
inorder(root.right , __A )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if len(__A ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(__A ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(__A , __A )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 708
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "head.fc" in name:
lowercase__ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
lowercase__ = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase__ = "bit.encoder." + name
return name
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( _UpperCAmelCase ):
def a ( self ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = 8
# DPR tok
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case_ = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(snake_case , exist_ok=snake_case )
snake_case_ = os.path.join(snake_case , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
snake_case_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(snake_case , exist_ok=snake_case )
snake_case_ = os.path.join(snake_case , BART_VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(snake_case , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
def a ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def a ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def a ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a ( self ):
snake_case_ = os.path.join(self.tmpdirname , 'rag_tokenizer' )
snake_case_ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
snake_case_ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(snake_case )
rag_tokenizer.save_pretrained(snake_case )
snake_case_ = RagTokenizer.from_pretrained(snake_case , config=snake_case )
self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , snake_case )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a ( self ):
snake_case_ = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
snake_case_ = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
snake_case_ = tokenizer(snake_case )
self.assertIsNotNone(snake_case )
@slow
def a ( self ):
snake_case_ = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
snake_case_ = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
snake_case_ = tokenizer(snake_case )
self.assertIsNotNone(snake_case )
| 362
|
from string import ascii_uppercase
A_: int = {char: i for i, char in enumerate(ascii_uppercase)}
A_: Optional[Any] = dict(enumerate(ascii_uppercase))
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = len(_A )
_lowercase = 0
while True:
if x == i:
_lowercase = 0
if len(_A ) == len(_A ):
break
key += key[i]
i += 1
return key
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = """"""
_lowercase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_lowercase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = """"""
_lowercase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_lowercase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = """THE GERMAN ATTACK"""
_lowercase = """SECRET"""
_lowercase = generate_key(_A ,_A )
_lowercase = cipher_text(_A ,_A )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(_A ,_A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 398
| 0
|
def a_ ( __magic_name__ ) -> list:
"""simple docstring"""
for i in range(len(__magic_name__ ) - 1 , 0 , -1 ):
snake_case : Dict = False
for j in range(__magic_name__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : str = unsorted[j - 1], unsorted[j]
snake_case : Optional[Any] = True
for j in range(__magic_name__ ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Optional[Any] = unsorted[j + 1], unsorted[j]
snake_case : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
_a : List[Any] = [int(item) for item in user_input.split(',')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 84
|
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Any ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple =['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
A_ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274
|
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ )
for row_idx in range(lowerCamelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case_ : list[list[int]] = []
for current_row_idx in range(lowerCamelCase_ ):
snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ )
triangle.append(lowerCamelCase_ )
return triangle
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Optional[int] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case_ , snake_case_ : List[Any] = 1, 1
for current_col_idx in range(1 , lowerCamelCase_ ):
calculate_current_element(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return current_row
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ):
'''simple docstring'''
snake_case_ : Tuple = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx]
snake_case_ : str = above_to_left_elt + above_to_right_elt
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case_ : list[list[int]] = [[1]]
for row_index in range(1 , lowerCamelCase_ ):
snake_case_ : str = [0] + result[-1] + [0]
snake_case_ : str = row_index + 1
# Calculate the number of distinct elements in a row
snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) )
snake_case_ : Optional[Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case_ : Any = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case_ : List[str] = row_first_half + row_second_half
result.append(lowerCamelCase_ )
return result
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None:
snake_case_ : Any = F'''{func.__name__}({value})'''
snake_case_ : List[Any] = timeit(F'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 334
| 0
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __a( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : int = GenerationConfig(
do_sample=_SCREAMING_SNAKE_CASE ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE ,config_name=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = GenerationConfig.from_pretrained(_SCREAMING_SNAKE_CASE ,config_name=_SCREAMING_SNAKE_CASE )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample ,_SCREAMING_SNAKE_CASE )
self.assertEqual(loaded_config.temperature ,0.7 )
self.assertEqual(loaded_config.length_penalty ,1.0 )
self.assertEqual(loaded_config.bad_words_ids ,[[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k ,50 )
self.assertEqual(loaded_config.max_length ,20 )
self.assertEqual(loaded_config.max_time ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = AutoConfig.from_pretrained('''gpt2''' )
UpperCAmelCase_ : Tuple = GenerationConfig.from_model_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id ,default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id ,model_config.eos_token_id )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = GenerationConfig()
UpperCAmelCase_ : int = {
'''max_new_tokens''': 1_024,
'''foo''': '''bar''',
}
UpperCAmelCase_ : Dict = copy.deepcopy(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = generation_config.update(**_SCREAMING_SNAKE_CASE )
# update_kwargs was not modified (no side effects)
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens ,1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''foo''': '''bar'''} )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : str = GenerationConfig()
UpperCAmelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = GenerationConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo ,'''bar''' )
UpperCAmelCase_ : List[str] = GenerationConfig.from_model_config(_SCREAMING_SNAKE_CASE )
assert not hasattr(_SCREAMING_SNAKE_CASE ,'''foo''' ) # no new kwargs should be initialized if from config
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature ,1.0 )
self.assertEqual(default_config.do_sample ,_SCREAMING_SNAKE_CASE )
self.assertEqual(default_config.num_beams ,1 )
UpperCAmelCase_ : List[Any] = GenerationConfig(
do_sample=_SCREAMING_SNAKE_CASE ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
self.assertEqual(config.temperature ,0.7 )
self.assertEqual(config.do_sample ,_SCREAMING_SNAKE_CASE )
self.assertEqual(config.num_beams ,1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = GenerationConfig.from_pretrained(_SCREAMING_SNAKE_CASE ,temperature=1.0 )
self.assertEqual(loaded_config.temperature ,1.0 )
self.assertEqual(loaded_config.do_sample ,_SCREAMING_SNAKE_CASE )
self.assertEqual(loaded_config.num_beams ,1 ) # default value
@is_staging_test
class __a( unittest.TestCase ):
"""simple docstring"""
@classmethod
def a__ ( cls ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def a__ ( cls ) -> Any:
try:
delete_repo(token=cls._token ,repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def a__ ( self ) -> int:
UpperCAmelCase_ : Tuple = GenerationConfig(
do_sample=_SCREAMING_SNAKE_CASE ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('''test-generation-config''' ,use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_SCREAMING_SNAKE_CASE ,repo_id='''test-generation-config''' ,push_to_hub=_SCREAMING_SNAKE_CASE ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Tuple = GenerationConfig(
do_sample=_SCREAMING_SNAKE_CASE ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('''valid_org/test-generation-config-org''' ,use_auth_token=self._token )
UpperCAmelCase_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_SCREAMING_SNAKE_CASE ,repo_id='''valid_org/test-generation-config-org''' ,push_to_hub=_SCREAMING_SNAKE_CASE ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
| 703
|
from math import ceil
def lowerCamelCase__ ( _lowercase = 1001 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ : Tuple = 2 * i + 1
UpperCAmelCase_ : List[str] = 2 * i
UpperCAmelCase_ : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 300
| 0
|
from math import factorial
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Tuple = real
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = [1] * rank
else:
UpperCamelCase : List[Any] = rank
def __repr__( self ) -> Any:
return (
F"""{self.real}+"""
F"""{"+".join(str(SCREAMING_SNAKE_CASE_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def snake_case_ ( self ) -> str:
UpperCamelCase : Optional[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real, SCREAMING_SNAKE_CASE_ )
def __add__( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
return Dual(self.real + other, self.duals )
UpperCamelCase : List[str] = self.duals.copy()
UpperCamelCase : Optional[int] = other.duals.copy()
if len(SCREAMING_SNAKE_CASE_ ) > len(SCREAMING_SNAKE_CASE_ ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ )) )
elif len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ )) )
UpperCamelCase : Optional[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ : List[Any] = __add__
def __sub__( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self + other * -1
def __mul__( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ : int = __mul__
def __truediv__( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other, SCREAMING_SNAKE_CASE_ )
raise ValueError
def __floordiv__( self, SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other, SCREAMING_SNAKE_CASE_ )
raise ValueError
def __pow__( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
if n < 0 or isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Dict ) -> Optional[int]:
if not callable(snake_case__ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(snake_case__ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('differentiate() requires an int as input for order' )
UpperCamelCase : Optional[int] = Dual(snake_case__ , 1 )
UpperCamelCase : Dict = func(snake_case__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCamelCase ( snake_case__ : str ) -> Optional[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A (unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=0.9 , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ):
__UpperCAmelCase : Tuple = size if size is not None else {"shortest_edge": 30}
__UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {"height": 30, "width": 30}
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = min_resolution
__UpperCAmelCase : Union[str, Any] = max_resolution
__UpperCAmelCase : Optional[int] = do_resize_and_center_crop
__UpperCAmelCase : Any = size
__UpperCAmelCase : Dict = crop_pct
__UpperCAmelCase : Optional[Any] = crop_size
__UpperCAmelCase : Optional[int] = do_normalize
__UpperCAmelCase : Union[str, Any] = image_mean
__UpperCAmelCase : List[str] = image_std
def _snake_case ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _snake_case ( self ):
__UpperCAmelCase : str = PoolFormerImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "image_std" ) )
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
__UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : int = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 168
| 0
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase_ :
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = question_encoder
lowerCAmelCase__ = generator
lowerCAmelCase__ = self.question_encoder
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict ):
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , '''question_encoder_tokenizer''' )
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE_ )
@classmethod
def __snake_case ( cls : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCAmelCase__ = kwargs.pop('''config''' , SCREAMING_SNAKE_CASE_ )
if config is None:
lowerCAmelCase__ = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ )
def __call__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return self.current_tokenizer(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ):
return self.generator.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.question_encoder
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.generator
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "longest" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , SCREAMING_SNAKE_CASE_ , )
if max_length is None:
lowerCAmelCase__ = self.current_tokenizer.model_max_length
lowerCAmelCase__ = self(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCAmelCase__ = self.current_tokenizer.model_max_length
lowerCAmelCase__ = self(
text_target=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = labels['''input_ids''']
return model_inputs
| 288
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCAmelCase : Dict = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_UpperCAmelCase : Tuple = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase__ = bs[:]
lowerCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :List[str] = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :str = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int="replace" , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE_ : Any="<unk>" , SCREAMING_SNAKE_CASE_ : int="<pad>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<mask>" , SCREAMING_SNAKE_CASE_ : Tuple=False , **SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ = errors # how to handle errors in decoding
lowerCAmelCase__ = bytes_to_unicode()
lowerCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
lowerCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word
return word
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = ''''''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCAmelCase__ = ''' ''' + text
return (text, kwargs)
| 288
| 1
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class UpperCamelCase :
def __init__( self :Union[str, Any] ) ->List[Any]:
lowercase : Union[str, Any] = [2, 1, 2, -1]
lowercase : str = [1, 2, 3, 4]
def __snake_case ( self :List[str] ) ->Tuple:
lowercase : Union[str, Any] = len(self.first_signal )
lowercase : int = len(self.second_signal )
lowercase : Any = max(__magic_name__ , __magic_name__ )
# create a zero matrix of max_length x max_length
lowercase : Any = [[0] * max_length for i in range(__magic_name__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__magic_name__ ):
lowercase : str = deque(self.second_signal )
rotated_signal.rotate(__magic_name__ )
for j, item in enumerate(__magic_name__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase : str = np.matmul(np.transpose(__magic_name__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__magic_name__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 264
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Optional[Any] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 385
| 0
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = IFImgaImgSuperResolutionPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def __magic_name__ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : List[str] , lowercase__ : List[str] , lowercase__ : Optional[int]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
a_ = torch.manual_seed(lowercase__ )
else:
a_ = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
a_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
a_ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
a_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __magic_name__ ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __magic_name__ ( self : List[str] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __magic_name__ ( self : Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __magic_name__ ( self : int ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __magic_name__ ( self : Union[str, Any] ):
self._test_save_load_local()
def __magic_name__ ( self : Tuple ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 143
| 1
|
"""simple docstring"""
import math
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_UpperCamelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
A__ : str = 'Enter the base and the power separated by a comma: '
A__ , A__ : List[Any] = map(int, input(prompt).split(','))
A__ , A__ : List[str] = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
A__ : Tuple = res(xa, ya)
A__ : Dict = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 353
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A__ : List[Any] = random.Random()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
if rng is None:
_lowercase: Any = global_rng
_lowercase: List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> List[Any]:
"""simple docstring"""
_lowercase: str = parent
_lowercase: int = batch_size
_lowercase: Tuple = min_seq_length
_lowercase: Any = max_seq_length
_lowercase: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase: Any = padding_value
_lowercase: List[str] = sampling_rate
_lowercase: Union[str, Any] = return_attention_mask
_lowercase: Optional[Any] = do_normalize
_lowercase: List[str] = feature_size
_lowercase: Optional[Any] = chunk_length
_lowercase: str = hop_length
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self , A_=False , A_=False ) -> List[Any]:
"""simple docstring"""
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
_lowercase: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowercase: Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowercase: str = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = WhisperFeatureExtractor if is_speech_available() else None
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: int = WhisperFeatureExtractionTester(self )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: Any = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
_lowercase: str = self.feature_extraction_class.from_pretrained(A_ )
_lowercase: Dict = feat_extract_first.to_dict()
_lowercase: List[str] = feat_extract_second.to_dict()
_lowercase: List[str] = feat_extract_first.mel_filters
_lowercase: str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A_ )
_lowercase: Tuple = self.feature_extraction_class.from_json_file(A_ )
_lowercase: int = feat_extract_first.to_dict()
_lowercase: Optional[int] = feat_extract_second.to_dict()
_lowercase: Tuple = feat_extract_first.mel_filters
_lowercase: Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowercase: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowercase: List[str] = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
_lowercase: Dict = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowercase: Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowercase: Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
_lowercase: Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowercase: Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowercase: Any = np.asarray(A_ )
_lowercase: Tuple = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
_lowercase: List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowercase: List[Any] = [np.asarray(A_ ) for speech_input in speech_inputs]
_lowercase: Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowercase: str = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
_lowercase: Optional[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_lowercase: List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase: List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
_lowercase: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase: Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowercase: int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowercase: List[str] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[str] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_lowercase: Optional[Any] = self._load_datasamples(1 )
_lowercase: Optional[int] = WhisperFeatureExtractor()
_lowercase: Dict = feature_extractor(A_ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase: List[str] = self._load_datasamples(1 )[0]
_lowercase: Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_lowercase: Any = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 353
| 1
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : str = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'segformer'
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[2, 2, 2, 2] , __UpperCamelCase=[8, 4, 2, 1] , __UpperCamelCase=[32, 64, 1_60, 2_56] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[1, 2, 5, 8] , __UpperCamelCase=[4, 4, 4, 4] , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-6 , __UpperCamelCase=2_56 , __UpperCamelCase=2_55 , **__UpperCamelCase , )-> Optional[int]:
super().__init__(**__UpperCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , __UpperCamelCase , )
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Union[str, Any] = num_encoder_blocks
UpperCAmelCase__ : Tuple = depths
UpperCAmelCase__ : List[str] = sr_ratios
UpperCAmelCase__ : Union[str, Any] = hidden_sizes
UpperCAmelCase__ : Any = patch_sizes
UpperCAmelCase__ : List[Any] = strides
UpperCAmelCase__ : Optional[int] = mlp_ratios
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = classifier_dropout_prob
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = drop_path_rate
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : List[Any] = decoder_hidden_size
UpperCAmelCase__ : int = kwargs.get("reshape_last_stage" , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = semantic_loss_ignore_index
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-4
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 713
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660
| 0
|
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
lowerCAmelCase__ = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
lowerCAmelCase__ = {
'''jukebox''': 512,
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int]=["v3", "v2", "v2"] , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : List[str]="<|endoftext|>" , **__lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token
super().__init__(
unk_token=__lowerCAmelCase , n_genres=__lowerCAmelCase , version=__lowerCAmelCase , max_n_lyric_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : List[str] = version
_lowerCamelCase : List[str] = max_n_lyric_tokens
_lowerCamelCase : Optional[Any] = n_genres
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase : Tuple = json.load(__lowerCAmelCase )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase : Any = json.load(__lowerCAmelCase )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase : List[Any] = json.load(__lowerCAmelCase )
_lowerCamelCase : List[str] = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
_lowerCamelCase : Optional[Any] = oov.replace(R'''\-\'''' , R'''\-+\'''' )
_lowerCamelCase : Optional[Any] = regex.compile(__lowerCAmelCase )
_lowerCamelCase : Dict = {v: k for k, v in self.artists_encoder.items()}
_lowerCamelCase : Tuple = {v: k for k, v in self.genres_encoder.items()}
_lowerCamelCase : str = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.artists_encoder.get(__lowerCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : Union[str, Any] = [self.genres_encoder.get(__lowerCAmelCase , 0 ) for genre in list_genres[genres]]
_lowerCamelCase : List[str] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_lowerCamelCase : Union[str, Any] = [[self.lyrics_encoder.get(__lowerCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return list(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.prepare_for_tokenization(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = self._tokenize(__lowerCAmelCase )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ):
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_lowerCamelCase : Dict = artists[idx].lower()
_lowerCamelCase : List[str] = [genres[idx].lower()]
else:
_lowerCamelCase : int = self._normalize(artists[idx] ) + '''.v2'''
_lowerCamelCase : List[str] = [
self._normalize(__lowerCAmelCase ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_lowerCamelCase : Tuple = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
_lowerCamelCase : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
_lowerCamelCase : Optional[Any] = {vocab[index]: index + 1 for index in range(len(__lowerCAmelCase ) )}
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Any = len(__lowerCAmelCase ) + 1
_lowerCamelCase : int = self.vocab
_lowerCamelCase : Dict = {v: k for k, v in self.vocab.items()}
_lowerCamelCase : Tuple = ''''''
else:
_lowerCamelCase : int = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
_lowerCamelCase : Any = self._run_strip_accents(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = lyrics.replace('''\\''' , '''\n''' )
_lowerCamelCase : Optional[int] = self.out_of_vocab.sub('''''' , __lowerCAmelCase ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = unicodedata.normalize('''NFD''' , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
for char in text:
_lowerCamelCase : List[str] = unicodedata.category(__lowerCAmelCase )
if cat == "Mn":
continue
output.append(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (
[chr(__lowerCAmelCase ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(__lowerCAmelCase ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(__lowerCAmelCase ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
_lowerCamelCase : List[Any] = frozenset(__lowerCAmelCase )
_lowerCamelCase : Any = re.compile(R'''_+''' )
_lowerCamelCase : Optional[int] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
_lowerCamelCase : List[str] = pattern.sub('''_''' , __lowerCAmelCase ).strip('''_''' )
return text
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[str] ):
"""simple docstring"""
return " ".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : bool = False ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : str = TensorType(__lowerCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
_lowerCamelCase : Optional[int] = tf.constant
_lowerCamelCase : Optional[int] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
_lowerCamelCase : Any = torch.tensor
_lowerCamelCase : Tuple = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
_lowerCamelCase : str = jnp.array
_lowerCamelCase : Dict = _is_jax
else:
_lowerCamelCase : Dict = np.asarray
_lowerCamelCase : List[str] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_lowerCamelCase : int = [inputs]
if not is_tensor(__lowerCAmelCase ):
_lowerCamelCase : Dict = as_tensor(__lowerCAmelCase )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : int="" , __lowerCAmelCase : Union[str, Any]="pt" ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [0, 0, 0]
_lowerCamelCase : Optional[Any] = [artist] * len(self.version )
_lowerCamelCase : int = [genres] * len(self.version )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = self.tokenize(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self._convert_token_to_id(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Any = [-INFINITY] * len(full_tokens[-1] )
_lowerCamelCase : Optional[int] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCAmelCase ) )
_lowerCamelCase : Dict = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : str = self.artists_decoder.get(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [self.genres_decoder.get(__lowerCAmelCase ) for genre in genres_index]
_lowerCamelCase : Optional[int] = [self.lyrics_decoder.get(__lowerCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 83
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( _lowercase ):
def __init__(self , A , A ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ : int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__(self , A = 1 , A = None , A = 0.0 , A = 5_0 , A = None , A = "pil" , A = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , A ):
lowerCamelCase_ : Optional[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCamelCase_ : Tuple = randn_tensor(A , generator=A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ : Optional[int] = self.unet(A , A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(
A , A , A , eta=A , use_clipped_model_output=A , generator=A ).prev_sample
lowerCamelCase_ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Union[str, Any] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 422
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Dict = Dict[str, Any]
_lowerCamelCase : Union[str, Any] = List[Prediction]
@add_end_docstrings(__lowerCamelCase )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any], *_UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
super().__init__(*_UpperCAmelCase, **_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self, "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def A_ ( self : int, **_UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE__ : Dict = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Dict, *_UpperCAmelCase : Optional[int], **_UpperCAmelCase : Optional[int] ) -> Union[Predictions, List[Prediction]]:
"""simple docstring"""
return super().__call__(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Any, _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.IntTensor([[image.height, image.width]] )
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor(images=[image], return_tensors="pt" )
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(text=inputs["words"], boxes=inputs["boxes"], return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : List[str] = target_size
return inputs
def A_ ( self : Optional[int], _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop("target_size" )
SCREAMING_SNAKE_CASE__ : List[Any] = self.model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE__ : str = model_inputs["bbox"]
return model_outputs
def A_ ( self : Tuple, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[str]=0.9 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = target_size[0].tolist()
def unnormalize(_UpperCAmelCase : Optional[int] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
SCREAMING_SNAKE_CASE__ : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
SCREAMING_SNAKE_CASE__ : Dict = [unnormalize(_UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
SCREAMING_SNAKE_CASE__ : Any = ["score", "label", "box"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [dict(zip(_UpperCAmelCase, _UpperCAmelCase ) ) for vals in zip(scores.tolist(), _UpperCAmelCase, _UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
SCREAMING_SNAKE_CASE__ : int = self.image_processor.post_process_object_detection(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = raw_annotations[0]
SCREAMING_SNAKE_CASE__ : List[str] = raw_annotation["scores"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = raw_annotation["labels"]
SCREAMING_SNAKE_CASE__ : List[Any] = raw_annotation["boxes"]
SCREAMING_SNAKE_CASE__ : int = scores.tolist()
SCREAMING_SNAKE_CASE__ : int = [self.model.config.idalabel[label.item()] for label in labels]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self._get_bounding_box(_UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
SCREAMING_SNAKE_CASE__ : List[Any] = ["score", "label", "box"]
SCREAMING_SNAKE_CASE__ : Any = [
dict(zip(_UpperCAmelCase, _UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"] )
]
return annotation
def A_ ( self : str, _UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = box.int().tolist()
SCREAMING_SNAKE_CASE__ : List[Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 157
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "data2vec-audio"
def __init__( self : List[str], _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : str=7_6_8, _UpperCAmelCase : Dict=1_2, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : Dict=3_0_7_2, _UpperCAmelCase : str="gelu", _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Tuple=0.0, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.02, _UpperCAmelCase : Tuple=1E-5, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2), _UpperCAmelCase : str=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2), _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[str]=1_6, _UpperCAmelCase : Any=1_9, _UpperCAmelCase : List[Any]=5, _UpperCAmelCase : Dict=0.05, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Optional[Any]=0.0, _UpperCAmelCase : List[Any]=1_0, _UpperCAmelCase : Optional[Any]=0, _UpperCAmelCase : Optional[Any]="sum", _UpperCAmelCase : str=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[int]=2_5_6, _UpperCAmelCase : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0), _UpperCAmelCase : int=(5, 3, 3, 1, 1), _UpperCAmelCase : Optional[int]=(1, 2, 3, 1, 1), _UpperCAmelCase : Optional[Any]=5_1_2, _UpperCAmelCase : int=0, _UpperCAmelCase : Tuple=1, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : List[str]=False, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Any=2, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Any, ) -> Any:
"""simple docstring"""
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Optional[int] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Tuple = conv_pos_kernel_size
SCREAMING_SNAKE_CASE__ : List[str] = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : Dict = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[str] = final_dropout
SCREAMING_SNAKE_CASE__ : Tuple = layerdrop
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : int = mask_time_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ : Optional[int] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : int = add_adapter
SCREAMING_SNAKE_CASE__ : Dict = adapter_kernel_size
SCREAMING_SNAKE_CASE__ : Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE__ : Dict = num_adapter_layers
SCREAMING_SNAKE_CASE__ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Any = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = xvector_output_dim
@property
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
return math.prod(self.conv_stride )
| 157
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a_ : str = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class UpperCamelCase ( UpperCAmelCase_ ):
__UpperCamelCase ="""albert"""
def __init__( self : Dict , snake_case__ : str=3_0_0_0_0 , snake_case__ : int=1_2_8 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]=1_2 , snake_case__ : Optional[int]=1 , snake_case__ : Any=6_4 , snake_case__ : Optional[int]=1_6_3_8_4 , snake_case__ : List[Any]=1 , snake_case__ : Optional[int]="gelu_new" , snake_case__ : Any=0 , snake_case__ : Optional[int]=0 , snake_case__ : Any=5_1_2 , snake_case__ : List[Any]=2 , snake_case__ : str=0.02 , snake_case__ : Any=1E-12 , snake_case__ : int=0.1 , snake_case__ : str="absolute" , snake_case__ : Tuple=0 , snake_case__ : List[Any]=2 , snake_case__ : str=3 , **snake_case__ : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_hidden_groups
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = inner_group_num
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = position_embedding_type
class UpperCamelCase ( UpperCAmelCase_ ):
@property
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 439
|
'''simple docstring'''
from typing import Any
import numpy as np
def _lowerCAmelCase ( _lowerCAmelCase )-> bool:
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Any:
__UpperCAmelCase = v.conjugate().T
__UpperCAmelCase = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def _lowerCAmelCase ( )-> None:
__UpperCAmelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__UpperCAmelCase = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), F'{a} is not hermitian.'
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
__UpperCAmelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), F'{a} is not hermitian.'
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 126
| 0
|
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
if height >= 1:
move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__ )
move_disk(snake_case__ , snake_case__ )
move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Optional[Any]:
print("""moving disk from""" , snake_case__ , """to""" , snake_case__ )
def a__ ( ) -> List[Any]:
lowerCamelCase = int(input("""Height of hanoi: """ ).strip() )
move_tower(snake_case__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 533
|
"""simple docstring"""
def a__ ( snake_case__ = 1_00_00_00 ) -> int:
lowerCamelCase = 1
lowerCamelCase = 1
lowerCamelCase = {1: 1}
for inputa in range(2 , snake_case__ ):
lowerCamelCase = 0
lowerCamelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase = counter
if counter > pre_counter:
lowerCamelCase = inputa
lowerCamelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 533
| 1
|
class UpperCamelCase__ :
def __init__(self : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
__a : Optional[int] = name
__a : List[Any] = value
__a : List[Any] = weight
def __repr__(self : Optional[Any] ):
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def lowerCAmelCase (self : Optional[Any] ):
return self.value
def lowerCAmelCase (self : Optional[int] ):
return self.name
def lowerCAmelCase (self : str ):
return self.weight
def lowerCAmelCase (self : Dict ):
return self.value / self.weight
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ):
__a : Any = []
for i in range(len(lowerCAmelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : int ):
__a : Dict = sorted(lowerCAmelCase__ , key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
__a : Dict = []
__a , __a : Dict = 0.0, 0.0
for i in range(len(lowerCAmelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ):
if dataset.ndim != value_array.ndim:
__a : Optional[Any] = (
'''Wrong input data\'s dimensions... '''
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(lowerCAmelCase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
__a : Optional[int] = (
'''Wrong input data\'s shape... '''
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(lowerCAmelCase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
__a : Tuple = (
'''Input data have different datatype... '''
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(lowerCAmelCase__ )
__a : Optional[Any] = []
for value in value_array:
__a : Union[str, Any] = euclidean(lowerCAmelCase__ , dataset[0] )
__a : List[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
__a : List[str] = euclidean(lowerCAmelCase__ , lowerCAmelCase__ )
if dist > temp_dist:
__a : List[Any] = temp_dist
__a : Optional[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ):
return np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) / (norm(lowerCAmelCase__ ) * norm(lowerCAmelCase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Dict:
__lowerCAmelCase = [10, 20, 30, 40, 50, 60]
__lowerCAmelCase = [2, 4, 6, 8, 10, 12]
__lowerCAmelCase = 100
self.assertEqual(kp.calc_profit(snake_case_ , snake_case_ , snake_case_ ) , 210 )
def A__ ( self ) -> Dict:
self.assertRaisesRegex(snake_case_ , """max_weight must greater than zero.""" )
def A__ ( self ) -> Tuple:
self.assertRaisesRegex(snake_case_ , """Weight can not be negative.""" )
def A__ ( self ) -> int:
self.assertRaisesRegex(snake_case_ , """Profit can not be negative.""" )
def A__ ( self ) -> Tuple:
self.assertRaisesRegex(snake_case_ , """max_weight must greater than zero.""" )
def A__ ( self ) -> Optional[int]:
self.assertRaisesRegex(
snake_case_ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 573
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class snake_case ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'layoutlmv3'
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=5_0_2_6_5 , UpperCamelCase__ : int=7_6_8 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : List[Any]=1_2 , UpperCamelCase__ : List[str]=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : str=1e-5 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=1_0_2_4 , UpperCamelCase__ : Any=1_2_8 , UpperCamelCase__ : str=1_2_8 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=3_2 , UpperCamelCase__ : Dict=1_2_8 , UpperCamelCase__ : List[str]=6_4 , UpperCamelCase__ : Optional[Any]=2_5_6 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : int=2_2_4 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Any=1_6 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Union[str, Any] = max_ad_position_embeddings
__lowerCAmelCase: Dict = coordinate_size
__lowerCAmelCase: Union[str, Any] = shape_size
__lowerCAmelCase: Dict = has_relative_attention_bias
__lowerCAmelCase: Dict = rel_pos_bins
__lowerCAmelCase: Union[str, Any] = max_rel_pos
__lowerCAmelCase: Tuple = has_spatial_attention_bias
__lowerCAmelCase: Union[str, Any] = rel_ad_pos_bins
__lowerCAmelCase: str = max_rel_ad_pos
__lowerCAmelCase: Optional[Any] = text_embed
__lowerCAmelCase: Union[str, Any] = visual_embed
__lowerCAmelCase: str = input_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Optional[Any] = patch_size
__lowerCAmelCase: Union[str, Any] = classifier_dropout
class snake_case ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Any = version.parse("""1.12""" )
@property
def lowercase_ ( self : str)-> Union[str, Any]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
])
@property
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
return 1e-5
@property
def lowercase_ ( self : Dict)-> int:
'''simple docstring'''
return 1_2
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 4_0 , UpperCamelCase__ : int = 4_0 , )-> Tuple:
'''simple docstring'''
setattr(processor.image_processor , "apply_ocr" , UpperCamelCase__)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase: int = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase: int = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__)
__lowerCAmelCase: Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__)
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase: Optional[int] = [[" ".join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowerCAmelCase: Tuple = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowerCAmelCase: Union[str, Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ))
return inputs
| 346
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :List[Any] , UpperCamelCase__ :Optional[NestedDataStructureLike[PathLike]] = None , UpperCamelCase__ :Optional[NamedSplit] = None , UpperCamelCase__ :Optional[Features] = None , UpperCamelCase__ :str = None , UpperCamelCase__ :bool = False , UpperCamelCase__ :bool = False , UpperCamelCase__ :Optional[int] = None , **UpperCamelCase__ :int , ):
_a = path_or_paths
_a = split if split or isinstance(UpperCamelCase__ , UpperCamelCase__ ) else "train"
_a = features
_a = cache_dir
_a = keep_in_memory
_a = streaming
_a = num_proc
_a = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self :Any ):
pass
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :Tuple , UpperCamelCase__ :Optional[Features] = None , UpperCamelCase__ :str = None , UpperCamelCase__ :bool = False , UpperCamelCase__ :bool = False , UpperCamelCase__ :Optional[int] = None , **UpperCamelCase__ :List[str] , ):
_a = features
_a = cache_dir
_a = keep_in_memory
_a = streaming
_a = num_proc
_a = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
pass
| 388
| 0
|
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
def get_matched_characters(lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__A = []
__A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A = int(max(0 , i - limit ) )
__A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase__ )
__A = F"""{_stra[0:_stra.index(lowerCamelCase__ )]} {_stra[_stra.index(lowerCamelCase__ ) + 1:]}"""
return "".join(lowerCamelCase__ )
# matching characters
__A = get_matched_characters(lowerCamelCase__ , lowerCamelCase__ )
__A = get_matched_characters(lowerCamelCase__ , lowerCamelCase__ )
__A = len(lowerCamelCase__ )
# transposition
__A = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase__ , lowerCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
__A = 0.0
else:
__A = (
1
/ 3
* (
match_count / len(lowerCamelCase__ )
+ match_count / len(lowerCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 721
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ):
'''simple docstring'''
__A = {}
if train_file is not None:
__A = [train_file]
if eval_file is not None:
__A = [eval_file]
if test_file is not None:
__A = [test_file]
__A = datasets.load_dataset("csv" , data_files=lowerCAmelCase__ )
__A = list(ds[list(files.keys() )[0]].features.keys() )
__A = features_name.pop(lowerCAmelCase__ )
__A = list(set(ds[list(files.keys() )[0]][label_name] ) )
__A = {label: i for i, label in enumerate(lowerCAmelCase__ )}
__A = tokenizer.model_input_names
__A = {}
if len(lowerCAmelCase__ ) == 1:
for k in files.keys():
__A = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" ) , batched=lowerCAmelCase__ , )
elif len(lowerCAmelCase__ ) == 2:
for k in files.keys():
__A = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , ) , batched=lowerCAmelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__A = {k: v for k, v in ex.items() if k in input_names}
__A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__A = {k: v for k, v in ex.items() if k in input_names}
__A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__A = {k: v for k, v in ex.items() if k in input_names}
__A = labelaid[ex[label_name]]
yield (d, label)
__A = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__A = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__A = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case_ : str =logging.getLogger(__name__)
@dataclass
class a__ :
UpperCAmelCase_ : int = field(metadata={'help': 'Which column contains the label'} )
UpperCAmelCase_ : str = field(default=lowerCAmelCase__ , metadata={'help': 'The path of the training file'} )
UpperCAmelCase_ : Optional[str] = field(default=lowerCAmelCase__ , metadata={'help': 'The path of the development file'} )
UpperCAmelCase_ : Optional[str] = field(default=lowerCAmelCase__ , metadata={'help': 'The path of the test file'} )
UpperCAmelCase_ : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase_ : bool = field(
default=lowerCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class a__ :
UpperCAmelCase_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCAmelCase_ : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCAmelCase ( ):
'''simple docstring'''
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__A , __A , __A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__A , __A , __A , __A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCAmelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCAmelCase__ ) -> Dict:
__A = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__A = TFTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__A = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__A = trainer.evaluate()
__A = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(lowerCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(lowerCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 205
| 0
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __a ( __UpperCamelCase ):
def A ( self : Dict , UpperCAmelCase : str ):
with open(UpperCAmelCase , encoding="""utf-8""" ) as input_file:
lowerCAmelCase_ : Dict = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
lowerCAmelCase_ : int = input_file.read()
lowerCAmelCase_ : Dict = regexp.search(UpperCAmelCase )
return match
def A ( self : List[str] , UpperCAmelCase : str ):
with open(UpperCAmelCase , encoding="""utf-8""" ) as input_file:
lowerCAmelCase_ : Tuple = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
lowerCAmelCase_ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCAmelCase_ : List[Any] = regexp.finditer(UpperCAmelCase )
lowerCAmelCase_ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Path("""./datasets""" )
lowerCAmelCase_ : Union[str, Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def A ( self : Tuple ):
lowerCAmelCase_ : str = Path("""./datasets""" )
lowerCAmelCase_ : List[Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 600
|
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> float:
'''simple docstring'''
lowerCAmelCase_ : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 600
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class a_( datasets.BuilderConfig ):
"""simple docstring"""
__snake_case : Optional[datasets.Features] =None
__snake_case : str ="utf-8"
__snake_case : Optional[str] =None
__snake_case : Optional[str] =None
__snake_case : bool =True # deprecated
__snake_case : Optional[int] =None # deprecated
__snake_case : int =10 << 20 # 10MB
__snake_case : Optional[bool] =None
class a_( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__snake_case : List[Any] =JsonConfig
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
SCREAMING_SNAKE_CASE = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase__ , (str, list, tuple)):
SCREAMING_SNAKE_CASE = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={'files': files}))
return splits
def __UpperCamelCase ( self : List[str] , lowerCAmelCase__ : pa.Table) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema.field(lowerCAmelCase__).type
SCREAMING_SNAKE_CASE = pa_table.append_column(lowerCAmelCase__ , pa.array([None] * len(lowerCAmelCase__) , type=lowerCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def __UpperCamelCase ( self : Dict , lowerCAmelCase__ : Optional[int]) -> str:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__)
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCAmelCase__ , (list, tuple)):
SCREAMING_SNAKE_CASE = set().union(*[row.keys() for row in dataset])
SCREAMING_SNAKE_CASE = {col: [row.get(lowerCAmelCase__) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = pa.Table.from_pydict(lowerCAmelCase__)
yield file_idx, self._cast_table(lowerCAmelCase__)
# If the file has one json object per line
else:
with open(lowerCAmelCase__ , 'rb') as f:
SCREAMING_SNAKE_CASE = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE = max(self.config.chunksize // 3_2 , 1_6 << 1_0)
SCREAMING_SNAKE_CASE = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
SCREAMING_SNAKE_CASE = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE = batch.decode(self.config.encoding , errors=lowerCAmelCase__).encode('utf-8')
try:
while True:
try:
SCREAMING_SNAKE_CASE = paj.read_json(
io.BytesIO(lowerCAmelCase__) , read_options=paj.ReadOptions(block_size=lowerCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(lowerCAmelCase__)
or block_size > len(lowerCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(lowerCAmelCase__)} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''')
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__)
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__)}: {e}''')
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCAmelCase__ , lowerCAmelCase__): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE = set().union(*[row.keys() for row in dataset])
SCREAMING_SNAKE_CASE = {col: [row.get(lowerCAmelCase__) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE = pa.Table.from_pydict(lowerCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__)}: {e}''')
raise ValueError(f'''Not able to read records in the JSON file at {file}.''') from None
yield file_idx, self._cast_table(lowerCAmelCase__)
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__)}: {e}''')
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys()))}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''') from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__)
batch_idx += 1
| 259
|
import random
def A_ ( lowercase_ ) ->bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE = pow(lowercase_ , lowercase_ , lowercase_ )
if v != 1:
SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE = i + 1
SCREAMING_SNAKE_CASE = (v**2) % num
return True
def A_ ( lowercase_ ) ->bool:
"""simple docstring"""
if num < 2:
return False
SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase_ )
def A_ ( lowercase_ = 1_0_2_4 ) ->int:
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase_ ):
return num
if __name__ == "__main__":
__UpperCAmelCase = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 259
| 1
|
"""simple docstring"""
import os
import sys
import unittest
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A = os.path.join(git_repo_path, 'src', 'transformers')
A = '\n{0} = None\n'
A = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
A = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _a ( unittest.TestCase):
def __lowercase ( self : Optional[int] ) -> Tuple:
snake_case : str = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(_lowercase )
snake_case : Tuple = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(_lowercase , "tokenizers" )
snake_case : Tuple = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(_lowercase , "tensorflow_text" )
snake_case : Any = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(_lowercase , "sentencepiece_and_tokenizers" )
snake_case : Optional[int] = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(_lowercase , "sentencepiece_and_tensorflow_text" )
snake_case : Tuple = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(_lowercase , "sentencepiece_and_tokenizers_and_vision" )
def __lowercase ( self : List[str] ) -> List[str]:
snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , _lowercase )
self.assertIn("tensorflow_text" , _lowercase )
self.assertIn("sentencepiece_and_tokenizers" , _lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def __lowercase ( self : List[str] ) -> List[Any]:
snake_case : Tuple = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(_lowercase , "\nCONSTANT = None\n" )
snake_case : Dict = create_dummy_object("function" , "'torch'" )
self.assertEqual(
_lowercase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
snake_case : Union[str, Any] = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(_lowercase , _lowercase )
def __lowercase ( self : List[str] ) -> Dict:
snake_case : int = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
snake_case : List[str] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , _lowercase )
| 449
|
"""simple docstring"""
from random import randint, random
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: bool = False , lowerCamelCase_: bool = False , lowerCamelCase_: int = 5 , ):
"""simple docstring"""
snake_case : str = [[-1] * number_of_cells] # Create a highway without any car
snake_case : str = 0
snake_case : Any = max(lowerCamelCase_ , 0 )
while i < number_of_cells:
snake_case : Optional[int] = (
randint(0 , lowerCamelCase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : Any = 0
snake_case : int = highway_now[car_index + 1 :]
for cell in range(len(lowerCamelCase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCamelCase_ , -1 )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list , lowerCamelCase_: float , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : List[str] = len(lowerCamelCase_ )
# Beforce calculations, the highway is empty
snake_case : Dict = [-1] * number_of_cells
for car_index in range(lowerCamelCase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
snake_case : Tuple = min(highway_now[car_index] + 1 , lowerCamelCase_ )
# Number of empty cell before the next car
snake_case : int = get_distance(lowerCamelCase_ , lowerCamelCase_ ) - 1
# We can't have the car causing an accident
snake_case : str = min(next_highway[car_index] , lowerCamelCase_ )
if random() < probability:
# Randomly, a driver will slow down
snake_case : Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list , lowerCamelCase_: int , lowerCamelCase_: float , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : Optional[Any] = len(highway[0] )
for i in range(lowerCamelCase_ ):
snake_case : Union[str, Any] = update(highway[i] , lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[int] = [-1] * number_of_cells
for car_index in range(lowerCamelCase_ ):
snake_case : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
snake_case : Optional[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
snake_case : Tuple = speed
highway.append(lowerCamelCase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449
| 1
|
from math import sqrt
def lowercase__ ( _UpperCamelCase = 1_00_00_00) -> int:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2).is_integer():
num_cuboids += (
min(_UpperCamelCase , sum_shortest_sides // 2)
- max(1 , sum_shortest_sides - max_cuboid_size)
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 721
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : str = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RealmTokenizer
def __init__( self : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Tuple="[UNK]" , _SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" , _SCREAMING_SNAKE_CASE : Dict="[PAD]" , _SCREAMING_SNAKE_CASE : Any="[CLS]" , _SCREAMING_SNAKE_CASE : int="[MASK]" , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = PaddingStrategy.MAX_LENGTH
UpperCamelCase = text
UpperCamelCase = kwargs.pop('text_pair' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop('return_tensors' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_SCREAMING_SNAKE_CASE ):
if batch_text_pair is not None:
UpperCamelCase = batch_text_pair[idx]
else:
UpperCamelCase = None
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = encoded_candidates.get('input_ids' )
UpperCamelCase = encoded_candidates.get('attention_mask' )
UpperCamelCase = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_SCREAMING_SNAKE_CASE )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_SCREAMING_SNAKE_CASE )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {key: item for key, item in output_data.items() if len(_SCREAMING_SNAKE_CASE ) != 0}
return BatchEncoding(_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 410
| 0
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase__ : List[Any] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase( _A : Any , _A : int , _A : str ):
'''simple docstring'''
hf_model.apply_weight_norm()
UpperCAmelCase__ : Dict = checkpoint['''input_conv.weight_g''']
UpperCAmelCase__ : List[Any] = checkpoint['''input_conv.weight_v''']
UpperCAmelCase__ : Union[str, Any] = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase__ : Union[str, Any] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase__ : int = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase__ : Dict = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase__ : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase__ : Optional[int] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase__ : Dict = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase__ : List[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase__ : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase__ : str = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase__ : Union[str, Any] = checkpoint['''output_conv.1.weight_g''']
UpperCAmelCase__ : Optional[int] = checkpoint['''output_conv.1.weight_v''']
UpperCAmelCase__ : List[str] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase( _A : str , _A : Any , _A : Dict , _A : Dict=None , _A : str=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
UpperCAmelCase__ : Union[str, Any] = SpeechTaHifiGanConfig()
UpperCAmelCase__ : Tuple = SpeechTaHifiGan(_A )
UpperCAmelCase__ : Optional[int] = torch.load(_A )
load_weights(orig_checkpoint['''model''']['''generator'''] , _A , _A )
UpperCAmelCase__ : Tuple = np.load(_A )
UpperCAmelCase__ : Dict = stats[0].reshape(-1 )
UpperCAmelCase__ : str = stats[1].reshape(-1 )
UpperCAmelCase__ : int = torch.from_numpy(_A ).float()
UpperCAmelCase__ : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(_A )
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCamelCase__ : str = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 614
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = array[indexa], array[indexa]
def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if length > 1:
UpperCAmelCase__ : List[Any] = int(length / 2 )
for i in range(_A , low + middle ):
comp_and_swap(_A , _A , i + middle , _A )
bitonic_merge(_A , _A , _A , _A )
bitonic_merge(_A , low + middle , _A , _A )
def __UpperCamelCase( _A : list[int] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if length > 1:
UpperCAmelCase__ : Optional[int] = int(length / 2 )
bitonic_sort(_A , _A , _A , 1 )
bitonic_sort(_A , low + middle , _A , 0 )
bitonic_merge(_A , _A , _A , _A )
if __name__ == "__main__":
UpperCamelCase__ : Dict = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ : Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 614
| 1
|
"""simple docstring"""
a : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a : str = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 85
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85
| 1
|
from __future__ import annotations
from PIL import Image
# Define glider example
__snake_case = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__snake_case = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _lowercase ( UpperCamelCase_ ) -> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for i in range(len(UpperCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
SCREAMING_SNAKE_CASE__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
SCREAMING_SNAKE_CASE__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase_ )
return next_generation
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> list[Image.Image]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for _ in range(UpperCamelCase_ ):
# Create output image
SCREAMING_SNAKE_CASE__ = Image.new('RGB' , (len(cells[0] ), len(UpperCamelCase_ )) )
SCREAMING_SNAKE_CASE__ = img.load()
# Save cells to image
for x in range(len(UpperCamelCase_ ) ):
for y in range(len(cells[0] ) ):
SCREAMING_SNAKE_CASE__ = 255 - cells[y][x] * 255
SCREAMING_SNAKE_CASE__ = (colour, colour, colour)
# Save image
images.append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = new_generation(UpperCamelCase_ )
return images
if __name__ == "__main__":
__snake_case = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 472
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = 'weight'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SEWConfig()
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.wav_encoder.wav_model.cfg
else:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.conv_bias
SCREAMING_SNAKE_CASE__ = eval(fs_config.conv_feature_layers )
SCREAMING_SNAKE_CASE__ = [x[0] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[1] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = [x[2] for x in conv_layers]
SCREAMING_SNAKE_CASE__ = 'gelu'
SCREAMING_SNAKE_CASE__ = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = fs_config.activation_fn.name
SCREAMING_SNAKE_CASE__ = fs_config.encoder_embed_dim
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = fs_config.encoder_ffn_embed_dim
SCREAMING_SNAKE_CASE__ = 1e-5
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.encoder_attention_heads
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos_groups
SCREAMING_SNAKE_CASE__ = fs_config.conv_pos
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = fs_config.encoder_layers
SCREAMING_SNAKE_CASE__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
SCREAMING_SNAKE_CASE__ = model.cfg
SCREAMING_SNAKE_CASE__ = fs_config.final_dropout
SCREAMING_SNAKE_CASE__ = fs_config.layerdrop
SCREAMING_SNAKE_CASE__ = fs_config.activation_dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
SCREAMING_SNAKE_CASE__ = fs_config.attention_dropout
SCREAMING_SNAKE_CASE__ = fs_config.dropout_input
SCREAMING_SNAKE_CASE__ = fs_config.dropout
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_prob
SCREAMING_SNAKE_CASE__ = fs_config.mask_length
SCREAMING_SNAKE_CASE__ = fs_config.mask_prob
SCREAMING_SNAKE_CASE__ = 'Wav2Vec2FeatureExtractor'
SCREAMING_SNAKE_CASE__ = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> List[str]:
'''simple docstring'''
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
SCREAMING_SNAKE_CASE__ = SEWConfig.from_pretrained(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = convert_config(model[0] , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(UpperCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , 'vocab.json' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCamelCase_ ) )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = SEWForCTC(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = SEWModel(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__snake_case = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 472
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_snake_case = logging.get_logger("""transformers.models.speecht5""")
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
hf_model.apply_weight_norm()
lowercase__ = checkpoint["input_conv.weight_g"]
lowercase__ = checkpoint["input_conv.weight_v"]
lowercase__ = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowercase__ = checkpoint[f'''upsamples.{i}.1.weight_g''']
lowercase__ = checkpoint[f'''upsamples.{i}.1.weight_v''']
lowercase__ = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
lowercase__ = checkpoint["output_conv.1.weight_g"]
lowercase__ = checkpoint["output_conv.1.weight_v"]
lowercase__ = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , ):
if config_path is not None:
lowercase__ = SpeechTaHifiGanConfig.from_pretrained(__magic_name__ )
else:
lowercase__ = SpeechTaHifiGanConfig()
lowercase__ = SpeechTaHifiGan(__magic_name__ )
lowercase__ = torch.load(__magic_name__ )
load_weights(orig_checkpoint["model"]["generator"] , __magic_name__ , __magic_name__ )
lowercase__ = np.load(__magic_name__ )
lowercase__ = stats[0].reshape(-1 )
lowercase__ = stats[1].reshape(-1 )
lowercase__ = torch.from_numpy(__magic_name__ ).float()
lowercase__ = torch.from_numpy(__magic_name__ ).float()
model.save_pretrained(__magic_name__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 611
| 0
|
UpperCAmelCase = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 666
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666
| 1
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def snake_case ( a_ : str , a_ : str , **a_ : int ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(a_ , **a_ )
UpperCamelCase_ : List[Any] = AutoModelForSeqaSeqLM.from_config(a_ )
model.save_pretrained(a_ )
AutoTokenizer.from_pretrained(a_ ).save_pretrained(a_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 543
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCamelCase ="\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCamelCase ="\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCamelCase ="\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCAmelCase , hypotheses=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase )
}
| 543
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''decision_transformer'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , _UpperCAmelCase : Optional[int]=17 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Optional[Any]=128 , _UpperCAmelCase : Optional[int]=4096 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Optional[int]=1024 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Tuple="relu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : int=1e-5 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=50256 , _UpperCAmelCase : Dict=50256 , _UpperCAmelCase : Any=False , _UpperCAmelCase : Union[str, Any]=False , **_UpperCAmelCase : Dict , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = state_dim
UpperCAmelCase_ = act_dim
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = max_ep_len
UpperCAmelCase_ = action_tanh
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scale_attn_weights
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ = reorder_and_upcast_attn
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 82
|
'''simple docstring'''
from timeit import timeit
def a ( UpperCamelCase_ : int ) -> int:
if number < 0:
raise ValueError('the value of input must not be negative' )
snake_case__ =0
while number:
number &= number - 1
result += 1
return result
def a ( UpperCamelCase_ : int ) -> int:
if number < 0:
raise ValueError('the value of input must not be negative' )
snake_case__ =0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a ( ) -> None:
def do_benchmark(UpperCamelCase_ : int ) -> None:
snake_case__ ='import __main__ as z'
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(UpperCamelCase_ ) = }""" )
snake_case__ =timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=UpperCamelCase_ )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(UpperCamelCase_ ) = }""" )
snake_case__ =timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=UpperCamelCase_ , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(UpperCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 538
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["image_processor"]
UpperCAmelCase = "SamImageProcessor"
def __init__( self : List[str] , _A : int ):
super().__init__(_A )
_UpperCamelCase = self.image_processor
_UpperCamelCase = -10
_UpperCamelCase = self.image_processor.size['''longest_edge''']
def __call__( self : Optional[Any] , _A : Tuple=None , _A : List[Any]=None , _A : Dict=None , _A : Any=None , _A : Optional[Union[str, TensorType]] = None , **_A : List[str] , ):
_UpperCamelCase = self.image_processor(
_A , return_tensors=_A , **_A , )
# pop arguments that are not used in the foward but used nevertheless
_UpperCamelCase = encoding_image_processor['''original_sizes''']
if hasattr(_A , '''numpy''' ): # Checks if Torch or TF tensor
_UpperCamelCase = original_sizes.numpy()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._check_and_preprocess_points(
input_points=_A , input_labels=_A , input_boxes=_A , )
_UpperCamelCase = self._normalize_and_convert(
_A , _A , input_points=_A , input_labels=_A , input_boxes=_A , return_tensors=_A , )
return encoding_image_processor
def UpperCamelCase_ ( self : str , _A : List[Any] , _A : Optional[int] , _A : Union[str, Any]=None , _A : Optional[int]=None , _A : Optional[Any]=None , _A : Dict="pt" , ):
if input_points is not None:
if len(_A ) != len(_A ):
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0] ) for point in input_points
]
else:
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , _A , _A )
for point, original_size in zip(_A , _A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_UpperCamelCase , _UpperCamelCase = self._pad_points_and_labels(_A , _A )
_UpperCamelCase = np.array(_A )
if input_labels is not None:
_UpperCamelCase = np.array(_A )
if input_boxes is not None:
if len(_A ) != len(_A ):
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0] , is_bounding_box=_A )
for box in input_boxes
]
else:
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , _A , _A , is_bounding_box=_A )
for box, original_size in zip(_A , _A )
]
_UpperCamelCase = np.array(_A )
if input_boxes is not None:
if return_tensors == "pt":
_UpperCamelCase = torch.from_numpy(_A )
# boxes batch size of 1 by default
_UpperCamelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_UpperCamelCase = tf.convert_to_tensor(_A )
# boxes batch size of 1 by default
_UpperCamelCase = tf.expand_dims(_A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_UpperCamelCase = torch.from_numpy(_A )
# point batch size of 1 by default
_UpperCamelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_UpperCamelCase = tf.convert_to_tensor(_A )
# point batch size of 1 by default
_UpperCamelCase = tf.expand_dims(_A , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_UpperCamelCase = torch.from_numpy(_A )
# point batch size of 1 by default
_UpperCamelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_UpperCamelCase = tf.convert_to_tensor(_A )
# point batch size of 1 by default
_UpperCamelCase = tf.expand_dims(_A , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def UpperCamelCase_ ( self : Any , _A : str , _A : Dict ):
_UpperCamelCase = max([point.shape[0] for point in input_points] )
_UpperCamelCase = []
for i, point in enumerate(_A ):
if point.shape[0] != expected_nb_points:
_UpperCamelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_UpperCamelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_A )
_UpperCamelCase = processed_input_points
return input_points, input_labels
def UpperCamelCase_ ( self : List[Any] , _A : int , _A : np.ndarray , _A : Any , _A : Dict=False ):
_UpperCamelCase , _UpperCamelCase = original_size
_UpperCamelCase , _UpperCamelCase = self.image_processor._get_preprocess_shape(_A , longest_edge=_A )
_UpperCamelCase = deepcopy(_A ).astype(_A )
if is_bounding_box:
_UpperCamelCase = coords.reshape(-1 , 2 , 2 )
_UpperCamelCase = coords[..., 0] * (new_w / old_w)
_UpperCamelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_UpperCamelCase = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase_ ( self : Any , _A : Optional[int]=None , _A : List[str]=None , _A : Optional[int]=None , ):
if input_points is not None:
if hasattr(_A , '''numpy''' ): # Checks for TF or Torch tensor
_UpperCamelCase = input_points.numpy().tolist()
if not isinstance(_A , _A ) or not isinstance(input_points[0] , _A ):
raise ValueError('''Input points must be a list of list of floating points.''' )
_UpperCamelCase = [np.array(_A ) for input_point in input_points]
else:
_UpperCamelCase = None
if input_labels is not None:
if hasattr(_A , '''numpy''' ):
_UpperCamelCase = input_labels.numpy().tolist()
if not isinstance(_A , _A ) or not isinstance(input_labels[0] , _A ):
raise ValueError('''Input labels must be a list of list integers.''' )
_UpperCamelCase = [np.array(_A ) for label in input_labels]
else:
_UpperCamelCase = None
if input_boxes is not None:
if hasattr(_A , '''numpy''' ):
_UpperCamelCase = input_boxes.numpy().tolist()
if (
not isinstance(_A , _A )
or not isinstance(input_boxes[0] , _A )
or not isinstance(input_boxes[0][0] , _A )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
_UpperCamelCase = [np.array(_A ).astype(np.floataa ) for box in input_boxes]
else:
_UpperCamelCase = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(_A ) )
def UpperCamelCase_ ( self : List[Any] , *_A : Union[str, Any] , **_A : Optional[Any] ):
return self.image_processor.post_process_masks(*_A , **_A )
| 71
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
# fmt: off
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCamelCase = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(_A , return_tensors='''np''' )
_UpperCamelCase = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=_A )
_UpperCamelCase = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_A ):
processor()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(_A )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 71
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowercase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : List[Any] = """Wav2Vec2FeatureExtractor"""
_lowerCamelCase : Any = """AutoTokenizer"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
super().__init__(_lowerCamelCase , _lowerCamelCase )
__lowercase = self.feature_extractor
__lowercase = False
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _lowerCamelCase , **_lowerCamelCase ) -> Any:
'''simple docstring'''
try:
return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , _lowerCamelCase , )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
__lowercase = WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__lowercase = kwargs.pop("raw_speech" )
else:
__lowercase = kwargs.pop("audio" , _lowerCamelCase )
__lowercase = kwargs.pop("sampling_rate" , _lowerCamelCase )
__lowercase = kwargs.pop("text" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__lowercase = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
__lowercase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE ( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase )
__lowercase = kwargs.pop("input_features" , _lowerCamelCase )
__lowercase = kwargs.pop("labels" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if input_features is not None:
__lowercase = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if labels is not None:
__lowercase = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__lowercase = labels["input_ids"]
return input_features
def SCREAMING_SNAKE_CASE ( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , *_lowerCamelCase , **_lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__lowercase = True
__lowercase = self.tokenizer
yield
__lowercase = self.feature_extractor
__lowercase = False
| 118
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PhobertTokenizer
_SCREAMING_SNAKE_CASE = False
def A ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
UpperCamelCase = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
UpperCamelCase = ['#version: 0.2', 'l à</w>']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
def A ( self : Any , **UpperCamelCase__ : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = 'Tôi là VinAI Research'
UpperCamelCase = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = 'Tôi là VinAI Research'
UpperCamelCase = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
UpperCamelCase = tokenizer.tokenize(UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 324
|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ , A__ ) -> float:
"""simple docstring"""
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 1
|
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
snake_case_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( _lowercase ):
def __init__(self : List[Any], *__UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Dict=None, __UpperCAmelCase : Any=None, __UpperCAmelCase : List[Any]=None, **__UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
super().__init__(*__UpperCAmelCase, **__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = eval_examples
SCREAMING_SNAKE_CASE : int = post_process_function
SCREAMING_SNAKE_CASE : Any = quant_trainer_args
SCREAMING_SNAKE_CASE : Tuple = 128 # default number of calibration samples
def lowercase__ (self : Optional[int], __UpperCAmelCase : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
SCREAMING_SNAKE_CASE : Optional[int] = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE : Tuple = self._remove_unused_columns(__UpperCAmelCase, description='''Calibration''' )
return DataLoader(
__UpperCAmelCase, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory, shuffle=__UpperCAmelCase, )
def lowercase__ (self : Optional[Any], __UpperCAmelCase : str=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_calib_dataloader(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = self.model
quant_trainer.configure_model(__UpperCAmelCase, self.quant_trainer_args, calib=__UpperCAmelCase )
model.eval()
quant_trainer.enable_calibration(__UpperCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__UpperCAmelCase ):
# Prediction step
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.prediction_step(__UpperCAmelCase, __UpperCAmelCase, prediction_loss_only=__UpperCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__UpperCAmelCase, self.quant_trainer_args )
SCREAMING_SNAKE_CASE : Tuple = model
def lowercase__ (self : Dict, __UpperCAmelCase : Optional[int]=None, __UpperCAmelCase : Tuple=None, __UpperCAmelCase : List[str]=None, __UpperCAmelCase : str = "eval" ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : Any = self.get_eval_dataloader(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Optional[int] = eval_loop(
__UpperCAmelCase, description='''Evaluation''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=__UpperCAmelCase, )
finally:
SCREAMING_SNAKE_CASE : Optional[int] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.post_process_function(__UpperCAmelCase, __UpperCAmelCase, output.predictions )
SCREAMING_SNAKE_CASE : List[str] = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : Any = metrics.pop(__UpperCAmelCase )
self.log(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : Optional[int] = self.callback_handler.on_evaluate(self.args, self.state, self.control, __UpperCAmelCase )
return metrics
def lowercase__ (self : Tuple, __UpperCAmelCase : Optional[int], __UpperCAmelCase : Tuple, __UpperCAmelCase : List[Any]=None, __UpperCAmelCase : str = "test" ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Tuple = self.compute_metrics
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : List[Any] = eval_loop(
__UpperCAmelCase, description='''Prediction''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=__UpperCAmelCase, )
finally:
SCREAMING_SNAKE_CASE : List[str] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : List[str] = self.post_process_function(__UpperCAmelCase, __UpperCAmelCase, output.predictions, '''predict''' )
SCREAMING_SNAKE_CASE : List[Any] = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = metrics.pop(__UpperCAmelCase )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=__UpperCAmelCase )
def lowercase__ (self : Tuple, __UpperCAmelCase : Optional[Any]="./" ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.eval_dataset
SCREAMING_SNAKE_CASE : Optional[int] = self.get_eval_dataloader(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = next(iter(__UpperCAmelCase ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(v.to(__UpperCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : str = self.model.to(__UpperCAmelCase )
model.eval()
model.float()
SCREAMING_SNAKE_CASE : Any = model.module if hasattr(__UpperCAmelCase, '''module''' ) else model
quant_trainer.configure_model(__UpperCAmelCase, self.quant_trainer_args )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(__UpperCAmelCase, '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, export_params=__UpperCAmelCase, opset_version=13, do_constant_folding=__UpperCAmelCase, input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''], output_names=['''output_start_logits''', '''output_end_logits'''], dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
}, verbose=__UpperCAmelCase, )
logger.info('''onnx export finished''' )
| 507
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__ ( nn.Module ):
def __init__(self : Union[str, Any], __UpperCAmelCase : int = 16, __UpperCAmelCase : int = 88, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : int = 1, __UpperCAmelCase : float = 0.0, __UpperCAmelCase : int = 32, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : bool = False, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : str = "geglu", __UpperCAmelCase : Optional[int] = None, ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCAmelCase, attention_head_dim=__UpperCAmelCase, in_channels=__UpperCAmelCase, num_layers=__UpperCAmelCase, dropout=__UpperCAmelCase, norm_num_groups=__UpperCAmelCase, cross_attention_dim=__UpperCAmelCase, attention_bias=__UpperCAmelCase, sample_size=__UpperCAmelCase, num_vector_embeds=__UpperCAmelCase, activation_fn=__UpperCAmelCase, num_embeds_ada_norm=__UpperCAmelCase, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
SCREAMING_SNAKE_CASE : int = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
SCREAMING_SNAKE_CASE : Dict = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 0]
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Dict, __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : List[str]=None, __UpperCAmelCase : List[Any]=None, __UpperCAmelCase : List[str]=None, __UpperCAmelCase : bool = True, ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = hidden_states
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
SCREAMING_SNAKE_CASE : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
SCREAMING_SNAKE_CASE : str = self.transformer_index_for_condition[i]
SCREAMING_SNAKE_CASE : Dict = self.transformers[transformer_index](
__UpperCAmelCase, encoder_hidden_states=__UpperCAmelCase, timestep=__UpperCAmelCase, cross_attention_kwargs=__UpperCAmelCase, return_dict=__UpperCAmelCase, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
SCREAMING_SNAKE_CASE : Optional[int] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCAmelCase )
| 507
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCAmelCase_ : str = logging.get_logger(__name__)
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 424
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ : List[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """tapas"""
def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=0 , lowerCamelCase_=10.0 , lowerCamelCase_=0 , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=1.0 , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=1.0 , lowerCamelCase_=1.0 , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_="ratio" , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=6_4 , lowerCamelCase_=3_2 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_a : Optional[Any] = vocab_size
_a : List[str] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Tuple = hidden_act
_a : Optional[Any] = intermediate_size
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : str = type_vocab_sizes
_a : Tuple = initializer_range
_a : int = layer_norm_eps
# Fine-tuning task hyperparameters
_a : Any = positive_label_weight
_a : Optional[int] = num_aggregation_labels
_a : Any = aggregation_loss_weight
_a : str = use_answer_as_supervision
_a : Optional[int] = answer_loss_importance
_a : int = use_normalized_answer_loss
_a : Optional[int] = huber_loss_delta
_a : Optional[int] = temperature
_a : Union[str, Any] = aggregation_temperature
_a : List[str] = use_gumbel_for_cells
_a : Optional[Any] = use_gumbel_for_aggregation
_a : str = average_approximation_function
_a : Tuple = cell_selection_preference
_a : Tuple = answer_loss_cutoff
_a : Optional[int] = max_num_rows
_a : List[Any] = max_num_columns
_a : Any = average_logits_per_cell
_a : str = select_one_column
_a : Any = allow_empty_column_selection
_a : Dict = init_cell_selection_weights_to_zero
_a : List[Any] = reset_position_index_per_cell
_a : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
_a : Dict = aggregation_labels
_a : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase_ ):
_a : str = {int(lowerCamelCase_ ): v for k, v in aggregation_labels.items()}
| 424
| 1
|
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
move_disk(lowerCAmelCase_ ,lowerCAmelCase_ )
move_tower(height - 1 ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : str ) -> Tuple:
"""simple docstring"""
print('moving disk from' ,lowerCAmelCase_ ,'to' ,lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =int(input('Height of hanoi: ' ).strip() )
move_tower(lowerCAmelCase_ ,'A' ,'B' ,'C' )
if __name__ == "__main__":
main()
| 220
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'Speech2TextFeatureExtractor'
_lowercase = 'Speech2TextTokenizer'
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extractor
SCREAMING_SNAKE_CASE_ : Optional[int] =False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE_ : Any =kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE_ : str =kwargs.pop('audio' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =kwargs.pop('sampling_rate' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =kwargs.pop('text' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ : Dict =args[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ : str =self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : str =encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
SCREAMING_SNAKE_CASE_ : int =self.tokenizer
yield
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor
SCREAMING_SNAKE_CASE_ : Tuple =False
| 220
| 1
|
"""simple docstring"""
import numpy as np
lowercase_ = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class snake_case :
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
__A = np.array(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : str ):
'''simple docstring'''
__A , __A = np.where(letter == self.SQUARE )
__A = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : int, _lowerCamelCase : int ):
'''simple docstring'''
__A = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str ):
'''simple docstring'''
__A = message.lower()
__A = message.replace(''' ''', '''''' )
__A = message.replace('''j''', '''i''' )
__A = np.empty((2, len(_lowerCamelCase )) )
for letter_index in range(len(_lowerCamelCase ) ):
__A = self.letter_to_numbers(message[letter_index] )
__A = numbers[0]
__A = numbers[1]
__A = first_step.reshape(2 * len(_lowerCamelCase ) )
__A = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
__A = int(second_step[numbers_index * 2] )
__A = int(second_step[(numbers_index * 2) + 1] )
__A = self.numbers_to_letter(_lowerCamelCase, _lowerCamelCase )
__A = encoded_message + letter
return encoded_message
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str ):
'''simple docstring'''
__A = message.lower()
message.replace(''' ''', '''''' )
__A = np.empty(2 * len(_lowerCamelCase ) )
for letter_index in range(len(_lowerCamelCase ) ):
__A = self.letter_to_numbers(message[letter_index] )
__A = numbers[0]
__A = numbers[1]
__A = first_step.reshape((2, len(_lowerCamelCase )) )
__A = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
__A = int(second_step[0, numbers_index] )
__A = int(second_step[1, numbers_index] )
__A = self.numbers_to_letter(_lowerCamelCase, _lowerCamelCase )
__A = decoded_message + letter
return decoded_message
| 215
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[torch.FloatTensor] = None
A_ : torch.FloatTensor = None
A_ : Optional[Tuple[torch.FloatTensor]] = None
A_ : Optional[Tuple[torch.FloatTensor]] = None
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple, _lowerCamelCase : List[str]=1, _lowerCamelCase : Union[str, Any]=0, _lowerCamelCase : List[str]=2, _lowerCamelCase : Optional[int]=5_12, _lowerCamelCase : Optional[Any]="cls", _lowerCamelCase : List[str]=False, _lowerCamelCase : Optional[Any]=True, **_lowerCamelCase : Any, ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase, bos_token_id=_lowerCamelCase, eos_token_id=_lowerCamelCase, **_lowerCamelCase )
__A = project_dim
__A = pooler_fn
__A = learn_encoder
__A = use_attention_mask
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : int = [R"pooler", R"logit_scale"]
A_ : List[Any] = [R"position_ids", R"predictions.decoder.bias"]
A_ : Union[str, Any] = "roberta"
A_ : Dict = RobertaSeriesConfig
def __init__( self : Optional[Any], _lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(_lowerCamelCase )
__A = XLMRobertaModel(_lowerCamelCase )
__A = nn.Linear(config.hidden_size, config.project_dim )
__A = getattr(_lowerCamelCase, '''has_pre_transformation''', _lowerCamelCase )
if self.has_pre_transformation:
__A = nn.Linear(config.hidden_size, config.project_dim )
__A = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps )
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[bool] = None, ):
'''simple docstring'''
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.base_model(
input_ids=_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, position_ids=_lowerCamelCase, head_mask=_lowerCamelCase, inputs_embeds=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, output_attentions=_lowerCamelCase, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=_lowerCamelCase, )
if self.has_pre_transformation:
__A = outputs['''hidden_states'''][-2]
__A = self.pre_LN(_lowerCamelCase )
__A = self.transformation_pre(_lowerCamelCase )
return TransformationModelOutput(
projection_state=_lowerCamelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
else:
__A = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_lowerCamelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 215
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : torch.FloatTensor
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self : Optional[Any] , _lowercase : int = 32 , _lowercase : int = 64 , _lowercase : int = 20 , _lowercase : int = 7_68 , _lowercase : Dict=77 , _lowercase : List[str]=4 , _lowercase : float = 0.0 , _lowercase : str = "silu" , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Optional[str] = "linear" , _lowercase : Optional[str] = "prd" , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , ):
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = attention_head_dim
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE__ : Dict = additional_embeddings
SCREAMING_SNAKE_CASE__ : int = time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE__ : List[Any] = embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : Dict = Timesteps(_lowercase , _lowercase , 0 )
SCREAMING_SNAKE_CASE__ : Dict = TimestepEmbedding(_lowercase , _lowercase , out_dim=_lowercase , act_fn=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = nn.Linear(_lowercase , _lowercase )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE__ : List[str] = nn.LayerNorm(_lowercase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
SCREAMING_SNAKE_CASE__ : Dict = nn.Linear(_lowercase , _lowercase )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE__ : Any = None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE__ : List[str] = nn.Linear(_lowercase , _lowercase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _lowercase ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE__ : Any = nn.Parameter(torch.zeros(1 , 1 , _lowercase ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
SCREAMING_SNAKE_CASE__ : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
_lowercase , _lowercase , _lowercase , dropout=_lowercase , activation_fn='''gelu''' , attention_bias=_lowercase , )
for d in range(_lowercase )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.LayerNorm(_lowercase )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE__ : Tuple = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.LayerNorm(_lowercase )
SCREAMING_SNAKE_CASE__ : int = nn.Linear(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE__ : str = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _lowercase , persistent=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = nn.Parameter(torch.zeros(1 , _lowercase ) )
SCREAMING_SNAKE_CASE__ : Dict = nn.Parameter(torch.zeros(1 , _lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = {}
def fn_recursive_add_processors(_lowercase : str , _lowercase : torch.nn.Module , _lowercase : Dict[str, AttentionProcessor] ):
if hasattr(_lowercase , '''set_processor''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _lowercase , _lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowercase , _lowercase , _lowercase )
return processors
def lowercase__ ( self : Tuple , _lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.attn_processors.keys() )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_lowercase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_lowercase : str , _lowercase : torch.nn.Module , _lowercase : Tuple ):
if hasattr(_lowercase , '''set_processor''' ):
if not isinstance(_lowercase , _lowercase ):
module.set_processor(_lowercase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _lowercase , _lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowercase , _lowercase , _lowercase )
def lowercase__ ( self : Union[str, Any] ):
self.set_attn_processor(AttnProcessor() )
def lowercase__ ( self : Tuple , _lowercase : Optional[int] , _lowercase : Union[torch.Tensor, float, int] , _lowercase : torch.FloatTensor , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[torch.BoolTensor] = None , _lowercase : bool = True , ):
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states.shape[0]
SCREAMING_SNAKE_CASE__ : int = timestep
if not torch.is_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ : Optional[int] = timesteps * torch.ones(_lowercase , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.time_proj(_lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE__ : Tuple = timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = self.time_embedding(_lowercase )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE__ : str = self.embedding_proj_norm(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.embedding_proj(_lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.encoder_hidden_states_proj(_lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
SCREAMING_SNAKE_CASE__ : str = self.proj_in(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE__ : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_states[:, None, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : int = self.prd_embedding.to(hidden_states.dtype ).expand(_lowercase , -1 , -1 )
additional_embeds.append(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
_lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE__ : Any = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = F.pad(
_lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
SCREAMING_SNAKE_CASE__ : int = F.pad(_lowercase , (0, self.additional_embeddings) , value=0.0 )
SCREAMING_SNAKE_CASE__ : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.norm_in(_lowercase )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE__ : str = block(_lowercase , attention_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self.norm_out(_lowercase )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : Any = hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE__ : Dict = hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE__ : List[str] = self.proj_to_clip_embeddings(_lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Optional[int] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 35
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
a : str = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
a : Tuple = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
a : Tuple = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
a : Any = name.replace('linear1' , 'fc1' )
if "linear2" in name:
a : str = name.replace('linear2' , 'fc2' )
if "norm1" in name:
a : str = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
a : List[Any] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
a : str = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
a : Optional[Any] = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
a : List[Any] = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
a : str = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
a : List[Any] = list(state_dict.keys() )
a : List[Any] = {}
for key in keys:
a : str = state_dict.pop(snake_case )
a : Optional[Any] = rename_keys(snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
a : List[str] = val[:hidden_size, :]
a : Any = val[hidden_size : 2 * hidden_size, :]
a : Any = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
a : Optional[Any] = val
else:
a : Any = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
a : Tuple = 1_024
a : List[Any] = 24
a : Dict = 16
elif checkpoint == "medium":
a : str = 1_536
a : Dict = 48
a : Optional[int] = 24
elif checkpoint == "large":
a : str = 2_048
a : Optional[Any] = 48
a : Union[str, Any] = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
a : List[str] = MusicgenDecoderConfig(
hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : str=None , snake_case : str=None , snake_case : List[Any]="cpu" ) -> List[str]:
"""simple docstring"""
a : Union[str, Any] = MusicGen.get_pretrained(snake_case , device=snake_case )
a : str = decoder_config_from_checkpoint(snake_case )
a : Dict = fairseq_model.lm.state_dict()
a : Union[str, Any] = rename_state_dict(
snake_case , hidden_size=decoder_config.hidden_size )
a : Dict = TaEncoderModel.from_pretrained('t5-base' )
a : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_32khz' )
a : int = MusicgenForCausalLM(snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
a : Optional[int] = decoder.load_state_dict(snake_case , strict=snake_case )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case )
if len(snake_case ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(snake_case ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
a : Tuple = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case )
# check we can do a forward pass
a : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
a : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
a : List[Any] = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
a : Optional[int] = AutoTokenizer.from_pretrained('t5-base' )
a : Optional[Any] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
a : str = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case )
# set the appropriate bos/pad token ids
a : List[str] = 2_048
a : Union[str, Any] = 2_048
# set other default generation config params
a : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
a : List[str] = True
a : str = 3.0
if pytorch_dump_folder is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(snake_case )
processor.push_to_hub(snake_case )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCamelCase : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 707
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
a : str = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
a : Tuple = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
a : Tuple = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
a : Any = name.replace('linear1' , 'fc1' )
if "linear2" in name:
a : str = name.replace('linear2' , 'fc2' )
if "norm1" in name:
a : str = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
a : List[Any] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
a : str = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
a : Optional[Any] = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
a : List[Any] = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
a : str = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
a : List[Any] = list(state_dict.keys() )
a : List[Any] = {}
for key in keys:
a : str = state_dict.pop(snake_case )
a : Optional[Any] = rename_keys(snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
a : List[str] = val[:hidden_size, :]
a : Any = val[hidden_size : 2 * hidden_size, :]
a : Any = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
a : Optional[Any] = val
else:
a : Any = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
a : Tuple = 1_024
a : List[Any] = 24
a : Dict = 16
elif checkpoint == "medium":
a : str = 1_536
a : Dict = 48
a : Optional[int] = 24
elif checkpoint == "large":
a : str = 2_048
a : Optional[Any] = 48
a : Union[str, Any] = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
a : List[str] = MusicgenDecoderConfig(
hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : str=None , snake_case : str=None , snake_case : List[Any]="cpu" ) -> List[str]:
"""simple docstring"""
a : Union[str, Any] = MusicGen.get_pretrained(snake_case , device=snake_case )
a : str = decoder_config_from_checkpoint(snake_case )
a : Dict = fairseq_model.lm.state_dict()
a , a : Union[str, Any] = rename_state_dict(
snake_case , hidden_size=decoder_config.hidden_size )
a : Dict = TaEncoderModel.from_pretrained('t5-base' )
a : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_32khz' )
a : int = MusicgenForCausalLM(snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
a , a : Optional[int] = decoder.load_state_dict(snake_case , strict=snake_case )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case )
if len(snake_case ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(snake_case ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
a : Tuple = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case )
# check we can do a forward pass
a : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
a : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
a : List[Any] = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
a : Optional[int] = AutoTokenizer.from_pretrained('t5-base' )
a : Optional[Any] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
a : str = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case )
# set the appropriate bos/pad token ids
a : List[str] = 2_048
a : Union[str, Any] = 2_048
# set other default generation config params
a : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
a : List[str] = True
a : str = 3.0
if pytorch_dump_folder is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(snake_case )
processor.push_to_hub(snake_case )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCamelCase : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 610
| 0
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase : int = '''src/diffusers'''
# Matches is_xxx_available()
lowerCamelCase : Tuple = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
lowerCamelCase : str = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
lowerCamelCase : Any = '''
{0} = None
'''
lowerCamelCase : int = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
lowerCamelCase : Optional[Any] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def snake_case_ ( lowerCAmelCase_ : List[str] ):
__lowercase : str = _re_backend.findall(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 0:
return None
return "_and_".join(lowerCAmelCase_ )
def snake_case_ ( ):
with open(os.path.join(lowerCAmelCase_ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : Optional[Any] = f.readlines()
# Get to the point we do the actual imports for type checking
__lowercase : List[Any] = 0
__lowercase : str = {}
# Go through the end of the file
while line_index < len(lowerCAmelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__lowercase : List[str] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
__lowercase : Any = []
# Until we unindent, add backend objects to the list
while line_index < len(lowerCAmelCase_ ) and len(lines[line_index] ) > 1:
__lowercase : Any = lines[line_index]
__lowercase : str = _re_single_line_import.search(lowerCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowerCAmelCase_ ) > 0:
__lowercase : int = objects
else:
line_index += 1
return backend_specific_objects
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ):
if name.isupper():
return DUMMY_CONSTANT.format(lowerCAmelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return DUMMY_CLASS.format(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any=None ):
if backend_specific_objects is None:
__lowercase : Any = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__lowercase : Optional[int] = {}
for backend, objects in backend_specific_objects.items():
__lowercase : List[str] = """[""" + """, """.join(F"\"{b}\"" for b in backend.split("""_and_""" ) ) + """]"""
__lowercase : Optional[Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowerCAmelCase_ , lowerCAmelCase_ ) for o in objects] )
__lowercase : str = dummy_file
return dummy_files
def snake_case_ ( lowerCAmelCase_ : str=False ):
__lowercase : Optional[Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__lowercase : Optional[Any] = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
__lowercase : Dict = os.path.join(lowerCAmelCase_ , """utils""" )
__lowercase : Optional[int] = {
backend: os.path.join(lowerCAmelCase_ , F"dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py" )
for backend in dummy_files.keys()
}
__lowercase : Tuple = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : str = f.read()
else:
__lowercase : Dict = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py as the main "
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F"diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py. Run `make fix-copies` "
"""to fix this.""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 149
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase : str = None
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
lowerCamelCase : List[Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
lowerCamelCase : Union[str, Any] = '''▁'''
# Segments (not really needed)
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : List[Any] = 2
lowerCamelCase : List[Any] = 3
lowerCamelCase : Dict = 4
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Any = '''left'''
_A : List[str] = XLNetTokenizer
def __init__( self : int , __a : Union[str, Any]=None , __a : Optional[int]=None , __a : List[Any]=False , __a : Tuple=True , __a : Tuple=False , __a : List[str]="<s>" , __a : int="</s>" , __a : Optional[int]="<unk>" , __a : Any="<sep>" , __a : Dict="<pad>" , __a : str="<cls>" , __a : List[str]="<mask>" , __a : Optional[int]=["<eop>", "<eod>"] , **__a : Any , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
vocab_file=__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
__lowercase : str = 3
__lowercase : Optional[Any] = do_lower_case
__lowercase : Union[str, Any] = remove_space
__lowercase : List[str] = keep_accents
__lowercase : Optional[Any] = vocab_file
__lowercase : Union[str, Any] = False if not self.vocab_file else True
def lowerCAmelCase ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = [self.sep_token_id]
__lowercase : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : Any , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Dict = [self.sep_token_id]
__lowercase : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : List[Any] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 149
| 1
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case__ ( __lowercase ) -> str:
"""simple docstring"""
def wrapper(*__lowercase , **__lowercase ):
A__ : Dict = timeit.default_timer()
A__ : Optional[Any] = func(*__lowercase , **__lowercase )
A__ : Union[str, Any] = timeit.default_timer() - starttime
return delta
A__ : int = func.__name__
return wrapper
def snake_case__ ( __lowercase , __lowercase=1_0_0 , __lowercase=None ) -> str:
"""simple docstring"""
A__ : int = []
A__ : Tuple = seq_shapes or {}
for i in range(__lowercase ):
A__ : Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowercase , _ArrayXD ):
A__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowercase , datasets.Value ):
if v.dtype == "string":
A__ : int = "The small grey turtle was surprisingly fast when challenged."
else:
A__ : int = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowercase , datasets.Sequence ):
while isinstance(__lowercase , datasets.Sequence ):
A__ : Dict = v.feature
A__ : Optional[Any] = seq_shapes[k]
A__ : Optional[int] = np.random.rand(*__lowercase ).astype(v.dtype )
A__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def snake_case__ ( __lowercase , __lowercase , __lowercase=1_0_0 , __lowercase=None ) -> Any:
"""simple docstring"""
A__ : List[str] = generate_examples(__lowercase , num_examples=__lowercase , seq_shapes=__lowercase )
with ArrowWriter(features=__lowercase , path=__lowercase ) as writer:
for key, record in dummy_data:
A__ : List[Any] = features.encode_example(__lowercase )
writer.write(__lowercase )
A__ : Any = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
A__ : Optional[Any] = datasets.Dataset.from_file(filename=__lowercase , info=datasets.DatasetInfo(features=__lowercase ) )
return dataset
| 711
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case__ ( __lowercase ) -> bool:
"""simple docstring"""
A__ : int = int(number**0.5 )
return number == sq * sq
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> tuple[int, int]:
"""simple docstring"""
A__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A__ : int = x_den * y_den * z_den
A__ : int = gcd(__lowercase , __lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case__ ( __lowercase = 3_5 ) -> int:
"""simple docstring"""
A__ : set = set()
A__ : int
A__ : Fraction = Fraction(0 )
A__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A__ : Any = x_num * y_den + x_den * y_num
A__ : List[Any] = x_den * y_den
A__ : List[Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A__ : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Union[str, Any] = int(sqrt(__lowercase ) )
A__ : int = int(sqrt(__lowercase ) )
A__ : Any = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=-1
A__ : Tuple = x_num * y_num
A__ : int = x_den * y_num + x_num * y_den
A__ : List[str] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : str = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = x_num * x_num * y_num * y_num
A__ : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Optional[int] = int(sqrt(__lowercase ) )
A__ : List[Any] = int(sqrt(__lowercase ) )
A__ : Union[str, Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : Optional[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
for num, den in unique_s:
total += Fraction(__lowercase , __lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 182
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCAmelCase_ ( __a ):
"""simple docstring"""
UpperCAmelCase__ = "big_bird"
def __init__( self , _SCREAMING_SNAKE_CASE=50_358 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3_072 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=4_096 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=66 , _SCREAMING_SNAKE_CASE="block_sparse" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , sep_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = type_vocab_size
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = use_cache
__UpperCamelCase = rescale_embeddings
__UpperCamelCase = attention_type
__UpperCamelCase = use_bias
__UpperCamelCase = block_size
__UpperCamelCase = num_random_blocks
__UpperCamelCase = classifier_dropout
class lowerCAmelCase_ ( __a ):
"""simple docstring"""
@property
def __lowercase( self ) -> str:
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 383
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
snake_case = None
snake_case = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
snake_case = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = True
__A = None
# Automatically constructed
__A = "PIL.Image.Image"
__A = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__A = field(default="Image" , init=__a , repr=__a )
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def a ( self : Any , __lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCAmelCase = np.array(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(__lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def a ( self : List[Any] , __lowerCAmelCase : dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_lowerCAmelCase = {}
_lowerCAmelCase , _lowerCAmelCase = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__lowerCAmelCase ):
_lowerCAmelCase = PIL.Image.open(__lowerCAmelCase )
else:
_lowerCAmelCase = path.split('::' )[-1]
try:
_lowerCAmelCase = string_to_dict(__lowerCAmelCase , config.HUB_DATASETS_URL )['repo_id']
_lowerCAmelCase = token_per_repo_id.get(__lowerCAmelCase )
except ValueError:
_lowerCAmelCase = None
with xopen(__lowerCAmelCase , 'rb' , use_auth_token=__lowerCAmelCase ) as f:
_lowerCAmelCase = BytesIO(f.read() )
_lowerCAmelCase = PIL.Image.open(bytes_ )
else:
_lowerCAmelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a ( self : int ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def a ( self : Optional[Any] , __lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
_lowerCAmelCase = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
_lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCAmelCase = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
_lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_lowerCAmelCase = storage.field('bytes' )
else:
_lowerCAmelCase = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_lowerCAmelCase = storage.field('path' )
else:
_lowerCAmelCase = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
_lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_lowerCAmelCase = pa.array(
[encode_np_array(np.array(__lowerCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_lowerCAmelCase = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
_lowerCAmelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
def a ( self : List[str] , __lowerCAmelCase : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase : List[str] ):
with xopen(__lowerCAmelCase , 'rb' ) as f:
_lowerCAmelCase = f.read()
return bytes_
_lowerCAmelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowerCAmelCase = pa.array(
[os.path.basename(__lowerCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
_lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
def A_ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_lowerCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def A_ ( _lowerCamelCase : "PIL.Image.Image" ):
_lowerCAmelCase = BytesIO()
if image.format in list_image_compression_formats():
_lowerCAmelCase = image.format
else:
_lowerCAmelCase = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(_lowerCamelCase , format=_lowerCamelCase )
return buffer.getvalue()
def A_ ( _lowerCamelCase : "PIL.Image.Image" ):
if hasattr(_lowerCamelCase , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )}
def A_ ( _lowerCamelCase : np.ndarray ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_lowerCAmelCase = array.dtype
_lowerCAmelCase = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_lowerCAmelCase = dtype.kind
_lowerCAmelCase = dtype.itemsize
_lowerCAmelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_lowerCAmelCase = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_lowerCAmelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_lowerCAmelCase = dtype_byteorder + dtype_kind + str(_lowerCamelCase )
_lowerCAmelCase = np.dtype(_lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
_lowerCAmelCase = PIL.Image.fromarray(array.astype(_lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )}
def A_ ( _lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_lowerCAmelCase , _lowerCAmelCase = first_non_null_value(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowerCamelCase , np.ndarray ):
_lowerCAmelCase = no_op_if_value_is_null(_lowerCamelCase )
return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs]
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase = no_op_if_value_is_null(_lowerCamelCase )
return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 309
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE__ =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ =""""""
else:
SCREAMING_SNAKE_CASE__ ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ =state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE__ =state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ =in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ =in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ =in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ =in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase, __UpperCamelCase )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =val
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ =Image.open(requests.get(__UpperCamelCase, stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =ViTConfig()
SCREAMING_SNAKE_CASE__ =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =int(vit_name[-12:-10] )
SCREAMING_SNAKE_CASE__ =int(vit_name[-9:-6] )
else:
SCREAMING_SNAKE_CASE__ =1_000
SCREAMING_SNAKE_CASE__ ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE__ ="""imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ =json.load(open(hf_hub_download(__UpperCamelCase, __UpperCamelCase, repo_type="""dataset""" ), """r""" ) )
SCREAMING_SNAKE_CASE__ ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ =idalabel
SCREAMING_SNAKE_CASE__ ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ =int(vit_name[-6:-4] )
SCREAMING_SNAKE_CASE__ =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
SCREAMING_SNAKE_CASE__ =192
SCREAMING_SNAKE_CASE__ =768
SCREAMING_SNAKE_CASE__ =12
SCREAMING_SNAKE_CASE__ =3
elif vit_name[9:].startswith("""small""" ):
SCREAMING_SNAKE_CASE__ =384
SCREAMING_SNAKE_CASE__ =1_536
SCREAMING_SNAKE_CASE__ =12
SCREAMING_SNAKE_CASE__ =6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
SCREAMING_SNAKE_CASE__ =768
SCREAMING_SNAKE_CASE__ =2_304
SCREAMING_SNAKE_CASE__ =8
SCREAMING_SNAKE_CASE__ =8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
SCREAMING_SNAKE_CASE__ =1_024
SCREAMING_SNAKE_CASE__ =4_096
SCREAMING_SNAKE_CASE__ =24
SCREAMING_SNAKE_CASE__ =16
elif vit_name[4:].startswith("""huge""" ):
SCREAMING_SNAKE_CASE__ =1_280
SCREAMING_SNAKE_CASE__ =5_120
SCREAMING_SNAKE_CASE__ =32
SCREAMING_SNAKE_CASE__ =16
# load original model from timm
SCREAMING_SNAKE_CASE__ =timm.create_model(__UpperCamelCase, pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ =timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =create_rename_keys(__UpperCamelCase, __UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ =ViTModel(__UpperCamelCase ).eval()
else:
SCREAMING_SNAKE_CASE__ =ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
SCREAMING_SNAKE_CASE__ =DeiTImageProcessor(size=config.image_size )
else:
SCREAMING_SNAKE_CASE__ =ViTImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE__ =image_processor(images=prepare_img(), return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ =encoding["""pixel_values"""]
SCREAMING_SNAKE_CASE__ =model(__UpperCamelCase )
if base_model:
SCREAMING_SNAKE_CASE__ =timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase, outputs.pooler_output, atol=1E-3 )
else:
SCREAMING_SNAKE_CASE__ =timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase, outputs.logits, atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 716
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase_ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCamelCase_ = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =(
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ =bs[:]
SCREAMING_SNAKE_CASE__ =0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ =[chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase, __UpperCamelCase ) )
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =set()
SCREAMING_SNAKE_CASE__ =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ =char
return pairs
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : List[Any] = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_VOCAB_FILES_MAP
_A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple ,_UpperCamelCase : List[Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Optional[Any]="replace" ,_UpperCamelCase : Tuple="<s>" ,_UpperCamelCase : Tuple="</s>" ,_UpperCamelCase : str="</s>" ,_UpperCamelCase : str="<s>" ,_UpperCamelCase : int="<unk>" ,_UpperCamelCase : str="<pad>" ,_UpperCamelCase : Tuple="<mask>" ,_UpperCamelCase : Any=False ,**_UpperCamelCase : Any ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else bos_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else eos_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else sep_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else cls_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else unk_token
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ =AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,**_UpperCamelCase ,)
with open(_UpperCamelCase ,encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ =json.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ={v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ =errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ =bytes_to_unicode()
SCREAMING_SNAKE_CASE__ ={v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase ,encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ =merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ =[tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ =dict(zip(_UpperCamelCase ,range(len(_UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ =re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self : Dict ) -> str:
'''simple docstring'''
return len(self.encoder )
def __A ( self : Tuple ) -> List[str]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __A ( self : Tuple ,_UpperCamelCase : Any ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ =tuple(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ =min(_UpperCamelCase ,key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =bigram
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =0
while i < len(_UpperCamelCase ):
try:
SCREAMING_SNAKE_CASE__ =word.index(_UpperCamelCase ,_UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ =j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ =tuple(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =new_word
if len(_UpperCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ =get_pairs(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =""" """.join(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =word
return word
def __A ( self : int ,_UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[]
for token in re.findall(self.pat ,_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ="""""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def __A ( self : List[str] ,_UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(_UpperCamelCase ,self.encoder.get(self.unk_token ) )
def __A ( self : int ,_UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
return self.decoder.get(_UpperCamelCase )
def __A ( self : Dict ,_UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""""".join(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def __A ( self : Optional[Any] ,_UpperCamelCase : str ,_UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ =os.path.join(
_UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ =os.path.join(
_UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_UpperCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_UpperCamelCase ,ensure_ascii=_UpperCamelCase ) + """\n""" )
SCREAMING_SNAKE_CASE__ =0
with open(_UpperCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ =token_index
writer.write(""" """.join(_UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def __A ( self : str ,_UpperCamelCase : List[int] ,_UpperCamelCase : Optional[List[int]] = None ,_UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def __A ( self : Any ,_UpperCamelCase : List[int] ,_UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Dict ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Tuple=False ,**_UpperCamelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ =""" """ + text
return (text, kwargs)
def __A ( self : List[Any] ,_UpperCamelCase : List[int] ,_UpperCamelCase : Optional[List[int]] = None ) -> Optional[Any]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __A ( self : int ,_UpperCamelCase : "Conversation" ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =""" """.join(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.encode(_UpperCamelCase )
if len(_UpperCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ =input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 588
| 0
|
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def snake_case ( A__ ):
return input_array.reshape((input_array.size, 1) )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = np.nan
for i in range(A__ ):
UpperCAmelCase_ : List[Any] = features[:, labels == i]
UpperCAmelCase_ : List[Any] = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase_ : List[str] = data - column_reshape(A__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A__ ,centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase_ : Tuple = np.dot(A__ ,centered_data.T )
return covariance_sum / features.shape[1]
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = features.mean(1 )
UpperCAmelCase_ : Any = np.nan
for i in range(A__ ):
UpperCAmelCase_ : Optional[Any] = features[:, labels == i]
UpperCAmelCase_ : List[Any] = data.shape[1]
UpperCAmelCase_ : int = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) ,(column_reshape(A__ ) - column_reshape(A__ )).T ,)
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase_ : int = device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) ,(column_reshape(A__ ) - column_reshape(A__ )).T ,)
return covariance_sum / features.shape[1]
def snake_case ( A__ ,A__ ):
# Check if the features have been loaded
if features.any():
UpperCAmelCase_ : Optional[Any] = features.mean(1 )
# Center the dataset
UpperCAmelCase_ : Optional[Any] = features - np.reshape(A__ ,(data_mean.size, 1) )
UpperCAmelCase_ : Union[str, Any] = np.dot(A__ ,centered_data.T ) / features.shape[1]
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = np.linalg.eigh(A__ )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase_ : List[str] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase_ : List[Any] = np.dot(filtered_eigenvectors.T ,A__ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format="%(message)s" ,force=A__ )
logging.error("Dataset empty" )
raise AssertionError
def snake_case ( A__ ,A__ ,A__ ,A__ ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = eigh(
covariance_between_classes(A__ ,A__ ,A__ ) ,covariance_within_classes(A__ ,A__ ,A__ ) ,)
UpperCAmelCase_ : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = np.linalg.svd(A__ )
UpperCAmelCase_ : Optional[int] = svd_matrix[:, 0:dimensions]
UpperCAmelCase_ : int = np.dot(filtered_svd_matrix.T ,A__ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format="%(message)s" ,force=A__ )
logging.error("Dataset empty" )
raise AssertionError
def snake_case ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase_ : Optional[Any] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase_ : str = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A__ ) as error_info:
UpperCAmelCase_ : Tuple = linear_discriminant_analysis(
A__ ,A__ ,A__ ,A__ )
if isinstance(A__ ,np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def snake_case ( ):
UpperCAmelCase_ : Tuple = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(A__ ) as error_info:
UpperCAmelCase_ : Dict = principal_component_analysis(A__ ,A__ )
if not np.allclose(A__ ,A__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''camembert-base''': 512,
}
lowerCamelCase_ = '''▁'''
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = CamembertTokenizer
def __init__( self : Tuple , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Dict="</s>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Union[str, Any]="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : Any=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCAmelCase_ : Any , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Dict = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : Optional[Any] = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Tuple = [self.cls_token_id]
UpperCAmelCase_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 95
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class snake_case_ :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=6 , UpperCamelCase=17 , UpperCamelCase=23 , UpperCamelCase=11 , UpperCamelCase=True , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = act_dim
lowerCamelCase__ = state_dim
lowerCamelCase__ = hidden_size
lowerCamelCase__ = max_length
lowerCamelCase__ = is_training
def __UpperCAmelCase ( self):
lowerCamelCase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
lowerCamelCase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
lowerCamelCase__ = floats_tensor((self.batch_size, self.seq_length, 1))
lowerCamelCase__ = floats_tensor((self.batch_size, self.seq_length, 1))
lowerCamelCase__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00)
lowerCamelCase__ = random_attention_mask((self.batch_size, self.seq_length))
lowerCamelCase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCAmelCase ( self):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
lowerCamelCase__ = DecisionTransformerModel(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
lowerCamelCase__ = model(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =(DecisionTransformerModel,) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] =()
__lowerCAmelCase : Optional[int] ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCAmelCase : Any =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCAmelCase : Any =False
__lowerCAmelCase : int =False
__lowerCAmelCase : Optional[Any] =False
__lowerCAmelCase : Optional[int] =False
__lowerCAmelCase : Union[str, Any] =False
__lowerCAmelCase : Dict =False
__lowerCAmelCase : List[str] =False
__lowerCAmelCase : Union[str, Any] =False
__lowerCAmelCase : str =False
def __UpperCAmelCase ( self):
lowerCamelCase__ = DecisionTransformerModelTester(self)
lowerCamelCase__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37)
def __UpperCAmelCase ( self):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase)
@slow
def __UpperCAmelCase ( self):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = DecisionTransformerModel.from_pretrained(UpperCamelCase)
self.assertIsNotNone(UpperCamelCase)
def __UpperCAmelCase ( self):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(UpperCamelCase)
lowerCamelCase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCamelCase)] , UpperCamelCase)
@require_torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self):
lowerCamelCase__ = 2 # number of steps of autoregressive prediction we will perform
lowerCamelCase__ = 10 # defined by the RL environment, may be normalized
lowerCamelCase__ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert")
lowerCamelCase__ = model.to(UpperCamelCase)
lowerCamelCase__ = model.config
torch.manual_seed(0)
lowerCamelCase__ = torch.randn(1 , 1 , config.state_dim).to(device=UpperCamelCase , dtype=torch.floataa) # env.reset()
lowerCamelCase__ = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=UpperCamelCase)
lowerCamelCase__ = torch.tensor(UpperCamelCase , device=UpperCamelCase , dtype=torch.floataa).reshape(1 , 1 , 1)
lowerCamelCase__ = state
lowerCamelCase__ = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase , dtype=torch.floataa)
lowerCamelCase__ = torch.zeros(1 , 0 , device=UpperCamelCase , dtype=torch.floataa)
lowerCamelCase__ = torch.tensor(0 , device=UpperCamelCase , dtype=torch.long).reshape(1 , 1)
for step in range(UpperCamelCase):
lowerCamelCase__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase)] , dim=1)
lowerCamelCase__ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase)] , dim=1)
lowerCamelCase__ = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model(
states=UpperCamelCase , actions=UpperCamelCase , rewards=UpperCamelCase , returns_to_go=UpperCamelCase , timesteps=UpperCamelCase , attention_mask=UpperCamelCase , return_dict=UpperCamelCase , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=UpperCamelCase , dtype=torch.floataa),
1.0,
False,
{},
)
lowerCamelCase__ = action_pred[0, -1]
lowerCamelCase__ = torch.cat([states, state] , dim=1)
lowerCamelCase__ = returns_to_go[0, -1] - reward
lowerCamelCase__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
lowerCamelCase__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase , dtype=torch.long) * (step + 1)] , dim=1)
| 717
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase_ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
lowerCAmelCase_ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
lowerCAmelCase_ = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase_ = os.path.join(os.path.expanduser("~"), ".cache")
lowerCAmelCase_ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def lowerCAmelCase( a__ : Dict , a__ : Union[str, Any]=False ):
'''simple docstring'''
lowerCamelCase__ = model_type
if use_small:
key += "_small"
return os.path.join(a__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def lowerCAmelCase( a__ : Optional[Any] , a__ : Union[str, Any] ):
'''simple docstring'''
os.makedirs(a__ , exist_ok=a__ )
hf_hub_download(repo_id=a__ , filename=a__ , local_dir=a__ )
def lowerCAmelCase( a__ : List[Any] , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : str="text" ):
'''simple docstring'''
if model_type == "text":
lowerCamelCase__ = BarkSemanticModel
lowerCamelCase__ = BarkSemanticConfig
lowerCamelCase__ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCamelCase__ = BarkCoarseModel
lowerCamelCase__ = BarkCoarseConfig
lowerCamelCase__ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCamelCase__ = BarkFineModel
lowerCamelCase__ = BarkFineConfig
lowerCamelCase__ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCamelCase__ = f"""{model_type}_small""" if use_small else model_type
lowerCamelCase__ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(a__ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
lowerCamelCase__ = torch.load(a__ , map_location=a__ )
# this is a hack
lowerCamelCase__ = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
lowerCamelCase__ = model_args["vocab_size"]
lowerCamelCase__ = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCamelCase__ = model_args.pop("n_head" )
lowerCamelCase__ = model_args.pop("n_embd" )
lowerCamelCase__ = model_args.pop("n_layer" )
lowerCamelCase__ = ConfigClass(**checkpoint["model_args"] )
lowerCamelCase__ = ModelClass(config=a__ )
lowerCamelCase__ = GenerationConfigClass()
lowerCamelCase__ = model_generation_config
lowerCamelCase__ = checkpoint["model"]
# fixup checkpoint
lowerCamelCase__ = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(a__ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCamelCase__ = k[len(a__ ) :]
for old_layer_name in new_layer_name_dict:
lowerCamelCase__ = new_k.replace(a__ , new_layer_name_dict[old_layer_name] )
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCamelCase__ = {k for k in extra_keys if not k.endswith(".attn.bias" )}
lowerCamelCase__ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCamelCase__ = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(a__ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(a__ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(a__ , strict=a__ )
lowerCamelCase__ = model.num_parameters(exclude_embeddings=a__ )
lowerCamelCase__ = checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(a__ , 3 )} loss""" )
model.eval()
model.to(a__ )
del checkpoint, state_dict
return model
def lowerCAmelCase( a__ : Tuple , a__ : List[Any]=False , a__ : Optional[Any]="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCamelCase__ = "cpu" # do conversion on cpu
lowerCamelCase__ = _get_ckpt_path(a__ , use_small=a__ )
lowerCamelCase__ = _load_model(a__ , a__ , model_type=a__ , use_small=a__ )
# load bark initial model
lowerCamelCase__ = _bark_load_model(a__ , "cpu" , model_type=a__ , use_small=a__ )
if model_type == "text":
lowerCamelCase__ = bark_model["model"]
if model.num_parameters(exclude_embeddings=a__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
lowerCamelCase__ = 5
lowerCamelCase__ = 10
if model_type in ["text", "coarse"]:
lowerCamelCase__ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCamelCase__ = bark_model(a__ )[0]
lowerCamelCase__ = model(a__ )
# take last logits
lowerCamelCase__ = output_new_model_total.logits[:, [-1], :]
else:
lowerCamelCase__ = 3
lowerCamelCase__ = 8
lowerCamelCase__ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCamelCase__ = model(a__ , a__ )
lowerCamelCase__ = bark_model(a__ , a__ )
lowerCamelCase__ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
def lowerCAmelCase( a__ : Dict , a__ : int , a__ : List[str] , a__ : Tuple , a__ : int , a__ : Tuple , ):
'''simple docstring'''
lowerCamelCase__ = os.path.join(a__ , a__ )
lowerCamelCase__ = BarkSemanticConfig.from_pretrained(os.path.join(a__ , "config.json" ) )
lowerCamelCase__ = BarkCoarseConfig.from_pretrained(os.path.join(a__ , "config.json" ) )
lowerCamelCase__ = BarkFineConfig.from_pretrained(os.path.join(a__ , "config.json" ) )
lowerCamelCase__ = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase__ = BarkSemanticModel.from_pretrained(a__ )
lowerCamelCase__ = BarkCoarseModel.from_pretrained(a__ )
lowerCamelCase__ = BarkFineModel.from_pretrained(a__ )
lowerCamelCase__ = EncodecModel.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase__ = BarkConfig.from_sub_model_configs(
a__ , a__ , a__ , a__ )
lowerCamelCase__ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCamelCase__ = BarkModel(a__ )
lowerCamelCase__ = semantic
lowerCamelCase__ = coarseAcoustic
lowerCamelCase__ = fineAcoustic
lowerCamelCase__ = codec
lowerCamelCase__ = bark_generation_config
Path(a__ ).mkdir(exist_ok=a__ )
bark.save_pretrained(a__ , repo_id=a__ , push_to_hub=a__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
lowerCAmelCase_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 426
| 0
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
lowercase_ : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
lowercase_ : Optional[int] = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowercase_ : Optional[int] = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
lowercase_ : Optional[int] = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class _lowerCAmelCase :
lowercase_ : str = field(
default=lowerCamelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase_ : str = field(
default=lowerCamelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
lowercase_ : Optional[str] = field(
default=lowerCamelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
lowercase_ : Optional[str] = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase_ : Optional[str] = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase_ : Optional[str] = field(
default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase_ : Optional[bool] = field(
default=lowerCamelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowercase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowercase_ : bool = field(
default=lowerCamelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_UpperCAmelCase = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = train_dataset.features["label"].names
if training_args.do_eval:
_UpperCAmelCase = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = eval_dataset.features["label"].names
if training_args.do_predict:
_UpperCAmelCase = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = predict_dataset.features["label"].names
# Labels
_UpperCAmelCase = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel={str(UpperCamelCase__ ): label for i, label in enumerate(UpperCamelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
def preprocess_function(UpperCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=UpperCamelCase__ , max_length=data_args.max_seq_length , truncation=UpperCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCamelCase__ ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_predict_samples )
_UpperCAmelCase = predict_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
_UpperCAmelCase = predict_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
_UpperCAmelCase = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase__ ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , UpperCamelCase__ ) else p.predictions
_UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase = trainer.evaluate(eval_dataset=UpperCamelCase__ )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(UpperCamelCase__ , metric_key_prefix="predict" )
_UpperCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCamelCase__ )
)
_UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("predict" , UpperCamelCase__ )
trainer.save_metrics("predict" , UpperCamelCase__ )
_UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(UpperCamelCase__ ):
_UpperCAmelCase = label_list[item]
writer.write(f"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 657
|
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657
| 1
|
"""simple docstring"""
def lowercase ( __snake_case : list ):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase_ : int = grid[0]
for row_n in range(1 , len(__snake_case ) ):
lowercase_ : Union[str, Any] = grid[row_n]
lowercase_ : str = fill_row(__snake_case , __snake_case )
lowercase_ : int = grid[row_n]
return grid[-1][-1]
def lowercase ( __snake_case : list , __snake_case : list ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( _A ):
def __init__( self : Optional[int] , A : VQModel , A : UNetaDModel , A : DDIMScheduler ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=A , unet=A , scheduler=A )
@torch.no_grad()
def __call__( self : List[Any] , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : float = 0.0 , A : int = 50 , A : Optional[str] = "pil" , A : bool = True , **A : Optional[int] , ) -> Union[Tuple, ImagePipelineOutput]:
lowercase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A , )
lowercase_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowercase_ : Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase_ : Dict = {}
if accepts_eta:
lowercase_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowercase_ : Optional[Any] = self.scheduler.scale_model_input(A , A )
# predict the noise residual
lowercase_ : int = self.unet(A , A ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : List[Any] = self.scheduler.step(A , A , A , **A ).prev_sample
# decode the image latents with the VAE
lowercase_ : int = self.vqvae.decode(A ).sample
lowercase_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 141
| 1
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Any = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''detr'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Any , lowercase : Union[str, Any]=True , lowercase : Optional[Any]=None , lowercase : Tuple=3 , lowercase : Optional[int]=1_00 , lowercase : Any=6 , lowercase : Optional[Any]=20_48 , lowercase : str=8 , lowercase : List[str]=6 , lowercase : Dict=20_48 , lowercase : Optional[Any]=8 , lowercase : Optional[int]=0.0 , lowercase : Union[str, Any]=0.0 , lowercase : List[Any]=True , lowercase : Any="relu" , lowercase : List[Any]=2_56 , lowercase : Any=0.1 , lowercase : Optional[Any]=0.0 , lowercase : List[Any]=0.0 , lowercase : str=0.0_2 , lowercase : List[str]=1.0 , lowercase : str=False , lowercase : Optional[Any]="sine" , lowercase : Union[str, Any]="resnet50" , lowercase : Tuple=True , lowercase : Tuple=False , lowercase : Optional[int]=1 , lowercase : List[Any]=5 , lowercase : List[str]=2 , lowercase : List[str]=1 , lowercase : Dict=1 , lowercase : Union[str, Any]=5 , lowercase : List[Any]=2 , lowercase : Dict=0.1 , **lowercase : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase , lowercase ):
UpperCAmelCase : Tuple = backbone_config.get("model_type" )
UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : str = config_class.from_dict(lowercase )
# set timm attributes to None
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = None, None, None
UpperCAmelCase : Optional[Any] = use_timm_backbone
UpperCAmelCase : Dict = backbone_config
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Any = num_queries
UpperCAmelCase : Union[str, Any] = d_model
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : int = decoder_layers
UpperCAmelCase : Optional[Any] = decoder_attention_heads
UpperCAmelCase : Tuple = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Any = activation_dropout
UpperCAmelCase : List[str] = activation_function
UpperCAmelCase : Optional[int] = init_std
UpperCAmelCase : Union[str, Any] = init_xavier_std
UpperCAmelCase : Union[str, Any] = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Any = encoder_layers
UpperCAmelCase : Any = auxiliary_loss
UpperCAmelCase : Any = position_embedding_type
UpperCAmelCase : Union[str, Any] = backbone
UpperCAmelCase : Any = use_pretrained_backbone
UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
UpperCAmelCase : List[Any] = class_cost
UpperCAmelCase : Optional[Any] = bbox_cost
UpperCAmelCase : Union[str, Any] = giou_cost
# Loss coefficients
UpperCAmelCase : Optional[Any] = mask_loss_coefficient
UpperCAmelCase : List[str] = dice_loss_coefficient
UpperCAmelCase : Dict = bbox_loss_coefficient
UpperCAmelCase : str = giou_loss_coefficient
UpperCAmelCase : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.d_model
@classmethod
def __lowerCAmelCase ( cls : List[Any] , lowercase : PretrainedConfig , **lowercase : Dict ):
'''simple docstring'''
return cls(backbone_config=lowercase , **lowercase )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase : str = self.backbone_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 12
| 595
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , lowercase : List[Any] , lowercase : int=13 , lowercase : int=7 , lowercase : Union[str, Any]=True , lowercase : Any=True , lowercase : Optional[Any]=True , lowercase : List[Any]=True , lowercase : Any=99 , lowercase : List[Any]=32 , lowercase : Dict=5 , lowercase : Optional[int]=4 , lowercase : Optional[int]=37 , lowercase : List[Any]="gelu" , lowercase : List[str]=0.1 , lowercase : int=0.1 , lowercase : Union[str, Any]=5_12 , lowercase : List[Any]=16 , lowercase : Optional[Any]=2 , lowercase : List[str]=0.0_2 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Dict = seq_length
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : List[Any] = use_attention_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : int = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Dict = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : List[Any] = num_choices
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[str] = None
if self.use_attention_mask:
UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : str = None
if self.use_token_type_ids:
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase : Tuple = model_class_name.from_pretrained("albert-base-v2" )
UpperCAmelCase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
@require_flax
class snake_case__ ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Tuple = FlaxAlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase : Dict = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase : List[Any] = model(lowercase , attention_mask=lowercase )[0]
UpperCAmelCase : Optional[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowercase )
UpperCAmelCase : Union[str, Any] = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
| 595
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = JukeboxTokenizer
SCREAMING_SNAKE_CASE_ = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def __lowerCamelCase( self ):
"""simple docstring"""
import torch
_snake_case : List[str] = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_snake_case : Any = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_snake_case : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __lowerCamelCase( self ):
"""simple docstring"""
import torch
_snake_case : Union[str, Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_snake_case : str = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_snake_case : Any = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 709
|
def UpperCAmelCase ( A__ ) -> list[list[int]]:
_snake_case : List[str] = []
if len(A__ ) == 1:
return [nums.copy()]
for _ in range(len(A__ ) ):
_snake_case : Optional[Any] = nums.pop(0 )
_snake_case : Any = permute(A__ )
for perm in permutations:
perm.append(A__ )
result.extend(A__ )
nums.append(A__ )
return result
def UpperCAmelCase ( A__ ) -> List[Any]:
def backtrack(A__ ):
if start == len(A__ ) - 1:
output.append(nums[:] )
else:
for i in range(A__ , len(A__ ) ):
_snake_case , _snake_case : Dict = nums[i], nums[start]
backtrack(start + 1 )
_snake_case , _snake_case : Union[str, Any] = nums[i], nums[start] # backtrack
_snake_case : int = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 519
| 0
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 654
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A = get_tests_dir("fixtures")
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = mock.Mock()
UpperCAmelCase__ = 5_00
UpperCAmelCase__ = {}
UpperCAmelCase__ = HTTPError
UpperCAmelCase__ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowercase ) as mock_head:
UpperCAmelCase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase__ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(_lowercase )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
@classmethod
def _UpperCAmelCase ( cls : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = ViTImageProcessor.from_pretrained(_lowercase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
UpperCAmelCase__ = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowercase , repo_id="test-image-processor" , push_to_hub=_lowercase , use_auth_token=self._token )
UpperCAmelCase__ = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = ViTImageProcessor.from_pretrained(_lowercase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
UpperCAmelCase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowercase , repo_id="valid_org/test-image-processor-org" , push_to_hub=_lowercase , use_auth_token=self._token )
UpperCAmelCase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase__ = CustomImageProcessor.from_pretrained(_lowercase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 475
| 0
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
A_ = None
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_lowerCamelCase = []
for i in range(__UpperCAmelCase ):
_lowerCamelCase = i / num_diffusion_timesteps
_lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 10_00 , A_ = "fixed_small_log" , A_ = True , A_ = 1.0 , A_ = "epsilon" , A_ = "squaredcos_cap_v2" , ) -> str:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
_lowerCamelCase = betas_for_alpha_bar(A_ )
_lowerCamelCase = 1.0 - self.betas
_lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
_lowerCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase = 1.0
# setable values
_lowerCamelCase = None
_lowerCamelCase = torch.from_numpy(np.arange(0 , A_ )[::-1].copy() )
_lowerCamelCase = variance_type
def UpperCamelCase_ ( self , A_ , A_ = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = num_inference_steps
_lowerCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase = torch.from_numpy(A_ ).to(A_ )
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=None ) -> List[Any]:
"""simple docstring"""
if prev_timestep is None:
_lowerCamelCase = t - 1
_lowerCamelCase = self.alphas_cumprod[t]
_lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase = self.betas[t]
else:
_lowerCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase = torch.log(torch.clamp(A_ , min=1E-2_0 ) )
_lowerCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase = variance.log()
_lowerCamelCase = beta.log()
_lowerCamelCase = (predicted_variance + 1) / 2
_lowerCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ = None , A_=None , A_ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase , _lowerCamelCase = torch.split(A_ , sample.shape[1] , dim=1 )
else:
_lowerCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase = t - 1
_lowerCamelCase = self.alphas_cumprod[t]
_lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase = self.betas[t]
_lowerCamelCase = self.alphas[t]
else:
_lowerCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase = torch.clamp(
A_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase = 0
if t > 0:
_lowerCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=A_ , device=model_output.device )
_lowerCamelCase = self._get_variance(
A_ , predicted_variance=A_ , prev_timestep=A_ , )
if self.variance_type == "fixed_small_log":
_lowerCamelCase = variance
elif self.variance_type == "learned_range":
_lowerCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
''' for the UnCLIPScheduler.''' )
_lowerCamelCase = variance * variance_noise
_lowerCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ , ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
_lowerCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_lowerCamelCase = timesteps.to(original_samples.device )
_lowerCamelCase = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 638
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 1
|
import string
import numpy
def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , lowercase__ )
class snake_case :
lowercase_ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase_ = numpy.vectorize(lambda UpperCamelCase_ : x % 36 )
lowercase_ = numpy.vectorize(UpperCamelCase_ )
def __init__( self : Tuple , a_ : numpy.ndarray )-> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.modulus(a_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
SCREAMING_SNAKE_CASE__ : int = encrypt_key.shape[0]
def __lowercase( self : Any , a_ : str )-> int:
"""simple docstring"""
return self.key_string.index(a_ )
def __lowercase( self : int , a_ : int )-> str:
"""simple docstring"""
return self.key_string[round(a_ )]
def __lowercase( self : Dict )-> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE__ : List[Any] = det % len(self.key_string )
SCREAMING_SNAKE_CASE__ : Tuple = len(self.key_string )
if greatest_common_divisor(a_ , len(self.key_string ) ) != 1:
SCREAMING_SNAKE_CASE__ : Any = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(a_ )
def __lowercase( self : int , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [char for char in text.upper() if char in self.key_string]
SCREAMING_SNAKE_CASE__ : str = chars[-1]
while len(a_ ) % self.break_key != 0:
chars.append(a_ )
return "".join(a_ )
def __lowercase( self : Any , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE__ : int = ''
for i in range(0 , len(a_ ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE__ : Any = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE__ : int = [self.replace_letters(a_ ) for char in batch]
SCREAMING_SNAKE_CASE__ : int = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE__ : Tuple = self.modulus(self.encrypt_key.dot(a_ ) ).T.tolist()[
0
]
SCREAMING_SNAKE_CASE__ : List[str] = ''.join(
self.replace_digits(a_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __lowercase( self : List[str] )-> numpy.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
SCREAMING_SNAKE_CASE__ : Any = det % len(self.key_string )
SCREAMING_SNAKE_CASE__ : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
SCREAMING_SNAKE_CASE__ : Any = i
break
SCREAMING_SNAKE_CASE__ : Optional[int] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(a_ ) )
def __lowercase( self : Dict , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.make_decrypt_key()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.process_text(text.upper() )
SCREAMING_SNAKE_CASE__ : List[str] = ''
for i in range(0 , len(a_ ) - self.break_key + 1 , self.break_key ):
SCREAMING_SNAKE_CASE__ : int = text[i : i + self.break_key]
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.replace_letters(a_ ) for char in batch]
SCREAMING_SNAKE_CASE__ : Any = numpy.array([vec] ).T
SCREAMING_SNAKE_CASE__ : str = self.modulus(decrypt_key.dot(a_ ) ).T.tolist()[0]
SCREAMING_SNAKE_CASE__ : int = ''.join(
self.replace_digits(a_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = int(input('Enter the order of the encryption key: ' ) )
SCREAMING_SNAKE_CASE__ : Dict = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = [int(lowercase__ ) for x in input().split()]
hill_matrix.append(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = HillCipher(numpy.array(lowercase__ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
SCREAMING_SNAKE_CASE__ : Any = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowercase__ ) )
elif option == "2":
SCREAMING_SNAKE_CASE__ : int = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 85
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( lowercase ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[Any] = ["""pixel_values"""]
def __init__( self, snake_case__ = True, snake_case__ = None, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = True, snake_case__ = None, snake_case__ = True, snake_case__ = 1 / 2_55, snake_case__ = True, snake_case__ = True, snake_case__ = None, snake_case__ = None, **snake_case__, ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : str = size if size is not None else {"""shortest_edge""": 2_56}
lowercase_ : int = get_size_dict(snake_case__, default_to_square=snake_case__ )
lowercase_ : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowercase_ : Optional[int] = get_size_dict(snake_case__, param_name="""crop_size""" )
lowercase_ : List[Any] = do_resize
lowercase_ : int = size
lowercase_ : int = do_center_crop
lowercase_ : Optional[Any] = crop_size
lowercase_ : str = resample
lowercase_ : Any = do_rescale
lowercase_ : Dict = rescale_factor
lowercase_ : List[Any] = offset
lowercase_ : int = do_normalize
lowercase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
lowercase_ : int = get_size_dict(snake_case__, default_to_square=snake_case__ )
if "shortest_edge" in size:
lowercase_ : Union[str, Any] = get_resize_output_image_size(snake_case__, size["""shortest_edge"""], default_to_square=snake_case__ )
elif "height" in size and "width" in size:
lowercase_ : Dict = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(snake_case__, size=snake_case__, resample=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
lowercase_ : List[Any] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(snake_case__, size=(size["""height"""], size["""width"""]), data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = True, snake_case__ = None, **snake_case__, ) -> str:
"""simple docstring"""
lowercase_ : Dict = image.astype(np.floataa )
if offset:
lowercase_ : Optional[Any] = image - (scale / 2)
return rescale(snake_case__, scale=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__, mean=snake_case__, std=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = ChannelDimension.FIRST, ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowercase_ : Optional[int] = to_numpy_array(snake_case__ )
if do_resize:
lowercase_ : Dict = self.resize(image=snake_case__, size=snake_case__, resample=snake_case__ )
if do_center_crop:
lowercase_ : Optional[int] = self.center_crop(snake_case__, size=snake_case__ )
if do_rescale:
lowercase_ : Dict = self.rescale(image=snake_case__, scale=snake_case__, offset=snake_case__ )
if do_normalize:
lowercase_ : Optional[Any] = self.normalize(image=snake_case__, mean=snake_case__, std=snake_case__ )
lowercase_ : Tuple = to_channel_dimension_format(snake_case__, snake_case__ )
return image
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = ChannelDimension.FIRST, **snake_case__, ) -> PIL.Image.Image:
"""simple docstring"""
lowercase_ : Any = do_resize if do_resize is not None else self.do_resize
lowercase_ : str = resample if resample is not None else self.resample
lowercase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : List[Any] = offset if offset is not None else self.offset
lowercase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = image_mean if image_mean is not None else self.image_mean
lowercase_ : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Dict = get_size_dict(snake_case__, default_to_square=snake_case__ )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Tuple = get_size_dict(snake_case__, param_name="""crop_size""" )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowercase_ : Optional[Any] = make_batched(snake_case__ )
lowercase_ : Optional[Any] = [
[
self._preprocess_image(
image=snake_case__, do_resize=snake_case__, size=snake_case__, resample=snake_case__, do_center_crop=snake_case__, crop_size=snake_case__, do_rescale=snake_case__, rescale_factor=snake_case__, offset=snake_case__, do_normalize=snake_case__, image_mean=snake_case__, image_std=snake_case__, data_format=snake_case__, )
for img in video
]
for video in videos
]
lowercase_ : List[str] = {"""pixel_values""": videos}
return BatchFeature(data=snake_case__, tensor_type=snake_case__ )
| 458
| 0
|
'''simple docstring'''
def __A ( a_ : Union[str, Any] = 3 ,a_ : int = 7 ,a_ : Any = 1_0_0_0_0_0_0 ):
lowerCAmelCase : List[str] = 0
lowerCAmelCase : int = 1
for current_denominator in range(1 ,limit + 1 ):
lowerCAmelCase : Tuple = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCAmelCase : Tuple = current_numerator
lowerCAmelCase : Tuple = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 717
|
'''simple docstring'''
def __A ( a_ : int ):
assert (
isinstance(a_ ,a_ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowerCAmelCase , lowerCAmelCase : int = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
| 0
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case_ : List[str] = """facebook/wmt19-en-de"""
snake_case_ : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case_ : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case_ : Optional[int] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
snake_case_ : str = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
snake_case_ : List[str] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
snake_case_ : List[str] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 595
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase : int = text_generator("This is a test" , do_sample=lowercase )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
UpperCAmelCase : List[Any] = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
lowercase , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
UpperCAmelCase : Any = text_generator("This is a test" , do_sample=lowercase , num_return_sequences=2 , return_tensors=lowercase )
self.assertEqual(
lowercase , [
{"generated_token_ids": ANY(lowercase )},
{"generated_token_ids": ANY(lowercase )},
] , )
UpperCAmelCase : Dict = text_generator.model.config.eos_token_id
UpperCAmelCase : List[str] = "<pad>"
UpperCAmelCase : List[str] = text_generator(
["This is a test", "This is a second test"] , do_sample=lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase , )
self.assertEqual(
lowercase , [
[
{"generated_token_ids": ANY(lowercase )},
{"generated_token_ids": ANY(lowercase )},
],
[
{"generated_token_ids": ANY(lowercase )},
{"generated_token_ids": ANY(lowercase )},
],
] , )
@require_tf
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase : Union[str, Any] = text_generator("This is a test" , do_sample=lowercase )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
UpperCAmelCase : List[str] = text_generator(["This is a test", "This is a second test"] , do_sample=lowercase )
self.assertEqual(
lowercase , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __lowerCAmelCase ( self : str , lowercase : str , lowercase : str , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = TextGenerationPipeline(model=lowercase , tokenizer=lowercase )
return text_generator, ["This is a test", "Another test"]
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Tuple = "Hello I believe in"
UpperCAmelCase : Dict = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase : Union[str, Any] = text_generator(lowercase )
self.assertEqual(
lowercase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
UpperCAmelCase : Optional[int] = text_generator(lowercase , stop_sequence=" fe" )
self.assertEqual(lowercase , [{"generated_text": "Hello I believe in fe"}] )
def __lowerCAmelCase ( self : str , lowercase : int , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = text_generator.model
UpperCAmelCase : Tuple = text_generator.tokenizer
UpperCAmelCase : Tuple = text_generator("This is a test" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCAmelCase : int = text_generator("This is a test" , return_full_text=lowercase )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCAmelCase : Tuple = pipeline(task="text-generation" , model=lowercase , tokenizer=lowercase , return_full_text=lowercase )
UpperCAmelCase : Any = text_generator("This is a test" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCAmelCase : int = text_generator("This is a test" , return_full_text=lowercase )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCAmelCase : Union[str, Any] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase : int = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
] , )
with self.assertRaises(lowercase ):
UpperCAmelCase : Optional[int] = text_generator("test" , return_full_text=lowercase , return_text=lowercase )
with self.assertRaises(lowercase ):
UpperCAmelCase : Tuple = text_generator("test" , return_full_text=lowercase , return_tensors=lowercase )
with self.assertRaises(lowercase ):
UpperCAmelCase : List[Any] = text_generator("test" , return_text=lowercase , return_tensors=lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase : List[str] = text_generator("" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase : Dict = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase : Union[str, Any] = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_00 , max_new_tokens=20 )
UpperCAmelCase : Tuple = text_generator("This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowercase ):
text_generator(
"This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
UpperCAmelCase : List[Any] = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase : Union[str, Any] = pipe("This is a test" )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase : List[Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase : Optional[int] = pipe("This is a test" )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase : List[str] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase : List[Any] = pipe("This is a test" )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
import torch
UpperCAmelCase : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=lowercase , top_p=0.5 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "Hello world"
UpperCAmelCase : Optional[int] = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
UpperCAmelCase : Optional[int] = logging.get_logger("transformers.generation.tf_utils" )
else:
UpperCAmelCase : List[str] = logging.get_logger("transformers.generation.utils" )
UpperCAmelCase : List[Any] = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowercase ) as cl:
UpperCAmelCase : str = text_generator(lowercase , max_length=10 , max_new_tokens=1 )
self.assertIn(lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowercase ) as cl:
UpperCAmelCase : List[str] = text_generator(lowercase , max_new_tokens=1 )
self.assertNotIn(lowercase , cl.out )
with CaptureLogger(lowercase ) as cl:
UpperCAmelCase : int = text_generator(lowercase , max_length=10 )
self.assertNotIn(lowercase , cl.out )
| 595
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return base * power(lowerCAmelCase_ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
__lowerCamelCase : Optional[Any] = int(input("Enter the base: ").strip())
__lowerCamelCase : Optional[Any] = int(input("Enter the exponent: ").strip())
__lowerCamelCase : str = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__lowerCamelCase : Optional[Any] = 1 / result
print(f"{base} to the power of {exponent} is {result}")
| 459
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : List[str] = '''data2vec-text'''
def __init__(self : List[str] , A__ : str=3_0_5_2_2 , A__ : Tuple=7_6_8 , A__ : Any=1_2 , A__ : Optional[int]=1_2 , A__ : str=3_0_7_2 , A__ : List[str]="gelu" , A__ : List[Any]=0.1 , A__ : Optional[int]=0.1 , A__ : Union[str, Any]=5_1_2 , A__ : Any=2 , A__ : str=0.0_2 , A__ : int=1e-12 , A__ : Union[str, Any]=1 , A__ : Optional[int]=0 , A__ : Union[str, Any]=2 , A__ : Optional[int]="absolute" , A__ : Tuple=True , A__ : int=None , **A__ : Any , ) -> Any:
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = classifier_dropout
class UpperCAmelCase ( _lowercase ):
@property
def UpperCAmelCase__ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 459
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCamelCase ( _a ):
lowerCAmelCase : Union[str, Any] = """gpt_neox_japanese"""
def __init__( self , UpperCAmelCase__=32_000 , UpperCAmelCase__=2_560 , UpperCAmelCase__=32 , UpperCAmelCase__=32 , UpperCAmelCase__=4 , UpperCAmelCase__="gelu" , UpperCAmelCase__=1.00 , UpperCAmelCase__=10_000 , UpperCAmelCase__=2_048 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-5 , UpperCAmelCase__=True , UpperCAmelCase__=31_996 , UpperCAmelCase__=31_999 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ):
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = attention_dropout
A__ = hidden_dropout
| 491
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__a = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 374
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowercase : Union[str, Any] = ["""gpt2"""]
lowercase : List[str] = """gpt2"""
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : List[str] , A_ : Dict ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_: List[str] = tokenizer
lowerCamelCase_: Optional[Any] = AutoConfig.from_pretrained(A_ )
lowerCamelCase_: Optional[Any] = TFGPTaLMHeadModel.from_config(A_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def lowerCAmelCase ( self : Tuple , A_ : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = self.tokenizer(A_ )
lowerCamelCase_: Optional[int] = tokenized["""input_ids"""].to_tensor()
lowerCamelCase_: Optional[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase_: Tuple = self.model(input_ids=A_ , attention_mask=A_ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
super().setUp()
lowerCamelCase_: Any = [GPTaTokenizer.from_pretrained(A_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase_: List[str] = [TFGPTaTokenizer.from_pretrained(A_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_: Optional[int] = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
lowerCamelCase_: Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCamelCase_: Tuple = tokenizer([test_inputs] , return_tensors="""tf""" )
lowerCamelCase_: int = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase_: List[str] = python_outputs[key].numpy()
lowerCamelCase_: Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(A_ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_: Dict = tf.function(A_ )
for test_inputs in self.test_sentences:
lowerCamelCase_: int = tf.constant(A_ )
lowerCamelCase_: Tuple = compiled_tokenizer(A_ )
lowerCamelCase_: Union[str, Any] = tf_tokenizer(A_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_: Optional[Any] = ModelToSave(tokenizer=A_ )
lowerCamelCase_: Any = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase_: List[Any] = model.serving(A_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_: Optional[int] = Path(A_ ) / """saved.model"""
tf.saved_model.save(A_ , A_ , signatures={"""serving_default""": model.serving} )
lowerCamelCase_: List[Any] = tf.saved_model.load(A_ )
lowerCamelCase_: List[str] = loaded_model.signatures["""serving_default"""](A_ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_: Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase_: Union[str, Any] = tf_tokenizer(A_ ) # Build model with some sample inputs
lowerCamelCase_: Optional[int] = tf_tokenizer.get_config()
lowerCamelCase_: Optional[int] = TFGPTaTokenizer.from_config(A_ )
lowerCamelCase_: str = model_from_config(A_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase_: Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
lowerCamelCase_: Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase_: Tuple = tf_tokenizer(A_ , max_length=A_ )
lowerCamelCase_: Tuple = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 584
|
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_ ( _UpperCAmelCase = 1_0_0_0_0_0_0 , _UpperCAmelCase = 1_0 ):
lowerCamelCase_: defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase_: Dict = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCamelCase_: List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 584
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__SCREAMING_SNAKE_CASE : str =0
__SCREAMING_SNAKE_CASE : int =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE : str =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__SCREAMING_SNAKE_CASE : Any =tuple[int, int]
class A_ :
def __init__( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Node | None , ):
lowercase = pos_x
lowercase = pos_y
lowercase = (pos_y, pos_x)
lowercase = goal_x
lowercase = goal_y
lowercase = g_cost
lowercase = parent
lowercase = self.calculate_heuristic()
lowercase = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = self.pos_x - self.goal_x
lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case__ ) + abs(snake_case__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , snake_case__ : Node ):
return self.f_cost < other.f_cost
class A_ :
def __init__( self : Any , snake_case__ : TPosition , snake_case__ : TPosition ):
lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case__ )
lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , snake_case__ )
lowercase = [self.start]
lowercase = []
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case__ )
self.closed_nodes.append(snake_case__ )
lowercase = self.get_successors(snake_case__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case__ )
else:
# retrieve the best current path
lowercase = self.open_nodes.pop(self.open_nodes.index(snake_case__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case__ )
else:
self.open_nodes.append(snake_case__ )
return [self.start.pos]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : Node ):
lowercase = []
for action in delta:
lowercase = parent.pos_x + action[1]
lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case__ , snake_case__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case__ , ) )
return successors
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Node | None ):
lowercase = node
lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[int] , snake_case__ : TPosition , snake_case__ : TPosition ):
lowercase = AStar(snake_case__ , snake_case__ )
lowercase = AStar(snake_case__ , snake_case__ )
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowercase = self.fwd_astar.open_nodes.pop(0 )
lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case__ , snake_case__ )
self.fwd_astar.closed_nodes.append(snake_case__ )
self.bwd_astar.closed_nodes.append(snake_case__ )
lowercase = current_bwd_node
lowercase = current_fwd_node
lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case__ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case__ )
else:
# retrieve the best current path
lowercase = astar.open_nodes.pop(
astar.open_nodes.index(snake_case__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case__ )
else:
astar.open_nodes.append(snake_case__ )
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Node , snake_case__ : Node ):
lowercase = self.fwd_astar.retrace_path(snake_case__ )
lowercase = self.bwd_astar.retrace_path(snake_case__ )
bwd_path.pop()
bwd_path.reverse()
lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE : str =(0, 0)
__SCREAMING_SNAKE_CASE : Union[str, Any] =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE : Any =time.time()
__SCREAMING_SNAKE_CASE : Optional[Any] =AStar(init, goal)
__SCREAMING_SNAKE_CASE : int =a_star.search()
__SCREAMING_SNAKE_CASE : Optional[int] =time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__SCREAMING_SNAKE_CASE : Optional[int] =time.time()
__SCREAMING_SNAKE_CASE : Dict =BidirectionalAStar(init, goal)
__SCREAMING_SNAKE_CASE : Tuple =time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 428
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Any ={
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] =['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] =[
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 428
| 1
|
import math
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A= F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase_ )
if number < 1:
_A= F"Input value of [number={number}] must be > 0"
raise ValueError(lowerCAmelCase_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_A= int(math.log(number // 3 , 2 ) ) + 2
_A= [3, 5]
_A= 2
_A= 3
for block in range(1 , lowerCAmelCase_ ):
for _ in range(lowerCAmelCase_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCAmelCase_ = 0
try:
UpperCAmelCase_ = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}")
| 710
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( _a , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] =UnCLIPImageVariationPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] =IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] =IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : int =[
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
_SCREAMING_SNAKE_CASE : Optional[int] =False
@property
def a__ ( self ):
return 32
@property
def a__ ( self ):
return 32
@property
def a__ ( self ):
return self.time_input_dim
@property
def a__ ( self ):
return self.time_input_dim * 4
@property
def a__ ( self ):
return 100
@property
def a__ ( self ):
_A= CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a__ ( self ):
torch.manual_seed(0 )
_A= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__ )
@property
def a__ ( self ):
torch.manual_seed(0 )
_A= CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowerCAmelCase__ )
@property
def a__ ( self ):
torch.manual_seed(0 )
_A= {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
_A= UnCLIPTextProjModel(**lowerCAmelCase__ )
return model
@property
def a__ ( self ):
torch.manual_seed(0 )
_A= {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
_A= UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def a__ ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a__ ( self ):
torch.manual_seed(0 )
_A= UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a__ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
_A= UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a__ ( self ):
_A= self.dummy_decoder
_A= self.dummy_text_proj
_A= self.dummy_text_encoder
_A= self.dummy_tokenizer
_A= self.dummy_super_res_first
_A= self.dummy_super_res_last
_A= UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_A= UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_A= CLIPImageProcessor(crop_size=32 , size=32 )
_A= self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 , lowerCAmelCase__=True ):
_A= floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('mps' ):
_A= torch.manual_seed(lowerCAmelCase__ )
else:
_A= torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
if pil_image:
_A= input_image * 0.5 + 0.5
_A= input_image.clamp(0 , 1 )
_A= input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_A= DiffusionPipeline.numpy_to_pil(lowerCAmelCase__ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a__ ( self ):
_A= 'cpu'
_A= self.get_dummy_components()
_A= self.pipeline_class(**lowerCAmelCase__ )
_A= pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= self.get_dummy_inputs(lowerCAmelCase__ , pil_image=lowerCAmelCase__ )
_A= pipe(**lowerCAmelCase__ )
_A= output.images
_A= self.get_dummy_inputs(lowerCAmelCase__ , pil_image=lowerCAmelCase__ )
_A= pipe(
**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
_A= image[0, -3:, -3:, -1]
_A= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A= np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self ):
_A= 'cpu'
_A= self.get_dummy_components()
_A= self.pipeline_class(**lowerCAmelCase__ )
_A= pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= self.get_dummy_inputs(lowerCAmelCase__ , pil_image=lowerCAmelCase__ )
_A= pipe(**lowerCAmelCase__ )
_A= output.images
_A= self.get_dummy_inputs(lowerCAmelCase__ , pil_image=lowerCAmelCase__ )
_A= pipe(
**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
_A= image[0, -3:, -3:, -1]
_A= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A= np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self ):
_A= 'cpu'
_A= self.get_dummy_components()
_A= self.pipeline_class(**lowerCAmelCase__ )
_A= pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= self.get_dummy_inputs(lowerCAmelCase__ , pil_image=lowerCAmelCase__ )
_A= [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
_A= pipe(**lowerCAmelCase__ )
_A= output.images
_A= self.get_dummy_inputs(lowerCAmelCase__ , pil_image=lowerCAmelCase__ )
_A= [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
_A= pipe(
**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
_A= image[0, -3:, -3:, -1]
_A= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_A= np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self ):
_A= torch.device('cpu' )
class lowerCAmelCase :
_SCREAMING_SNAKE_CASE : List[str] =1
_A= self.get_dummy_components()
_A= self.pipeline_class(**lowerCAmelCase__ )
_A= pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_A= pipe.decoder.dtype
_A= 1
_A= (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_A= pipe.prepare_latents(
lowerCAmelCase__ , dtype=lowerCAmelCase__ , device=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , scheduler=DummyScheduler() )
_A= (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_A= pipe.prepare_latents(
lowerCAmelCase__ , dtype=lowerCAmelCase__ , device=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , scheduler=DummyScheduler() )
_A= self.get_dummy_inputs(lowerCAmelCase__ , pil_image=lowerCAmelCase__ )
_A= pipe(
**lowerCAmelCase__ , decoder_latents=lowerCAmelCase__ , super_res_latents=lowerCAmelCase__ ).images
_A= self.get_dummy_inputs(lowerCAmelCase__ , pil_image=lowerCAmelCase__ )
# Don't pass image, instead pass embedding
_A= pipeline_inputs.pop('image' )
_A= pipe.image_encoder(lowerCAmelCase__ ).image_embeds
_A= pipe(
**lowerCAmelCase__ , decoder_latents=lowerCAmelCase__ , super_res_latents=lowerCAmelCase__ , image_embeddings=lowerCAmelCase__ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a__ ( self ):
_A= torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_A= 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase__ , expected_max_diff=lowerCAmelCase__ )
@skip_mps
def a__ ( self ):
_A= torch_device == 'cpu'
_A= True
_A= [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , additional_params_copy_to_batched_inputs=lowerCAmelCase__ , )
def a__ ( self ):
_A= [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_A= [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowerCAmelCase__ , additional_params_copy_to_batched_inputs=lowerCAmelCase__ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowerCAmelCase__ )
@skip_mps
def a__ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a__ ( self ):
return super().test_save_load_local()
@skip_mps
def a__ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
_A= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
_A= load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
_A= UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
_A= pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
_A= torch.Generator(device='cpu' ).manual_seed(0 )
_A= pipeline(
lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type='np' , )
_A= output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ , 15 )
| 476
| 0
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = "imagenet-1k-id2label.json"
lowercase__ : Any = 1_000
lowercase__ : Union[str, Any] = "huggingface/label-files"
lowercase__ : Dict = num_labels
lowercase__ : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) ) , "r" ) )
lowercase__ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : str = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : str = CvtConfig(num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : Union[str, Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : str = [2, 2, 20]
lowercase__ : Optional[int] = [3, 12, 16]
lowercase__ : str = [192, 768, 1_024]
lowercase__ : str = CvtForImageClassification(lowerCamelCase__ )
lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict = image_size
lowercase__ : Tuple = torch.load(lowerCamelCase__ , map_location=torch.device("cpu" ) )
lowercase__ : Any = OrderedDict()
lowercase__ : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Union[str, Any] = list_of_state_dict + cls_token(lowerCamelCase__ )
lowercase__ : str = list_of_state_dict + embeddings(lowerCamelCase__ )
for cnt in range(config.depth[idx] ):
lowercase__ : str = list_of_state_dict + attention(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 496
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase__ = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowerCAmelCase__ = {'''facebook/blenderbot-3B''': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase__ : Tuple = bs[:]
lowercase__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
lowercase__ : Union[str, Any] = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = set()
lowercase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : str = char
return pairs
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]="replace" , SCREAMING_SNAKE_CASE : str="<s>" , SCREAMING_SNAKE_CASE : Dict="</s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : str="<s>" , SCREAMING_SNAKE_CASE : List[str]="<unk>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , **SCREAMING_SNAKE_CASE : Tuple , ):
lowercase__ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token
lowercase__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token
lowercase__ : Dict = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token
lowercase__ : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token
lowercase__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else unk_token
lowercase__ : int = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
lowercase__ : List[Any] = json.load(SCREAMING_SNAKE_CASE )
lowercase__ : str = {v: k for k, v in self.encoder.items()}
lowercase__ : str = errors # how to handle errors in decoding
lowercase__ : List[Any] = bytes_to_unicode()
lowercase__ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
lowercase__ : str = merges_handle.read().split("\n" )[1:-1]
lowercase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : List[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : List[Any] = {}
lowercase__ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : List[str] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case ( self : int ):
return len(self.encoder )
def snake_case ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
if token in self.cache:
return self.cache[token]
lowercase__ : str = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowercase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : str = bigram
lowercase__ : Optional[Any] = []
lowercase__ : Dict = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
lowercase__ : str = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : List[Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : Any = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowercase__ : Optional[int] = get_pairs(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = " ".join(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = word
return word
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : List[Any] = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE ).split(" " ) )
return bpe_tokens
def snake_case ( self : int , SCREAMING_SNAKE_CASE : List[Any] ):
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
return self.decoder.get(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[Any] = "".join(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : int = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + "\n" )
lowercase__ : List[str] = 0
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase__ : int = token_index
writer.write(" ".join(SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return vocab_file, merge_file
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict=False , **SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
lowercase__ : Any = " " + text
return (text, kwargs)
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : "Conversation" ):
lowercase__ : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = " ".join(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.encode(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > self.model_max_length:
lowercase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 496
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : List[Any] = logging.get_logger(__name__)
__A : int = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[int] = 'deta'
SCREAMING_SNAKE_CASE:Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _a=None , _a=900 , _a=2048 , _a=6 , _a=2048 , _a=8 , _a=6 , _a=1024 , _a=8 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1.0 , _a=True , _a=False , _a="sine" , _a=5 , _a=4 , _a=4 , _a=True , _a=300 , _a=True , _a=True , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , _a=0.25 , **_a , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
a__ = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(_a , _a ):
a__ = backbone_config.pop('model_type' )
a__ = CONFIG_MAPPING[backbone_model_type]
a__ = config_class.from_dict(_a )
a__ = backbone_config
a__ = num_queries
a__ = max_position_embeddings
a__ = d_model
a__ = encoder_ffn_dim
a__ = encoder_layers
a__ = encoder_attention_heads
a__ = decoder_ffn_dim
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = activation_function
a__ = init_std
a__ = init_xavier_std
a__ = encoder_layerdrop
a__ = auxiliary_loss
a__ = position_embedding_type
# deformable attributes
a__ = num_feature_levels
a__ = encoder_n_points
a__ = decoder_n_points
a__ = two_stage
a__ = two_stage_num_proposals
a__ = with_box_refine
a__ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
a__ = class_cost
a__ = bbox_cost
a__ = giou_cost
# Loss coefficients
a__ = mask_loss_coefficient
a__ = dice_loss_coefficient
a__ = bbox_loss_coefficient
a__ = giou_loss_coefficient
a__ = eos_coefficient
a__ = focal_alpha
super().__init__(is_encoder_decoder=_a , **_a )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.d_model
def lowercase__ ( self ):
"""simple docstring"""
a__ = copy.deepcopy(self.__dict__ )
a__ = self.backbone_config.to_dict()
a__ = self.__class__.model_type
return output
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
def lowerCAmelCase_ ( a : List[Any] , a : List[Any] , a : Optional[Any] ):
a__ = UniSpeechSatForSequenceClassification.from_pretrained(a , config=a )
a__ = downstream_dict['projector.weight']
a__ = downstream_dict['projector.bias']
a__ = downstream_dict['model.post_net.linear.weight']
a__ = downstream_dict['model.post_net.linear.bias']
return model
def lowerCAmelCase_ ( a : Any , a : List[Any] , a : List[Any] ):
a__ = UniSpeechSatForAudioFrameClassification.from_pretrained(a , config=a )
a__ = downstream_dict['model.linear.weight']
a__ = downstream_dict['model.linear.bias']
return model
def lowerCAmelCase_ ( a : Optional[Any] , a : Dict , a : Optional[int] ):
a__ = UniSpeechSatForXVector.from_pretrained(a , config=a )
a__ = downstream_dict['connector.weight']
a__ = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a__ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
a__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
a__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
a__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
a__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
a__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
a__ = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCAmelCase_ ( a : Any , a : List[str] , a : List[str] , a : int ):
a__ = torch.load(a , map_location='cpu' )
a__ = checkpoint['Downstream']
a__ = UniSpeechSatConfig.from_pretrained(a )
a__ = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
a__ = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
a__ = convert_classification(a , a , a )
elif arch.endswith('ForAudioFrameClassification' ):
a__ = convert_diarization(a , a , a )
elif arch.endswith('ForXVector' ):
a__ = convert_xvector(a , a , a )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
a__ = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__A : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 126
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a__ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , _a , _a , _a = None , _a = None ) -> List[Any]:
_a : Union[str, Any] = None
_a : Optional[Any] = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_a : Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(_a ):
if item not in EXCLUDE_EXAMPLES:
_a : Any = os.path.join(_a , _a )
if os.path.isfile(_a ) and ".py" in item_path:
with self.subTest(
tested_script=_a , feature_script=_a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_a : Optional[int] = compare_against_test(
os.path.join(_a , _a ) , _a , _a , _a )
_a : Union[str, Any] = '''\n'''.join(_a )
if special_strings is not None:
for string in special_strings:
_a : Union[str, Any] = diff.replace(_a , '''''' )
self.assertEqual(_a , '''''' )
def __lowercase ( self ) -> Optional[Any]:
self.one_complete_example('''complete_nlp_example.py''' , _a )
self.one_complete_example('''complete_nlp_example.py''' , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_a : int = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , _a , _a , _a )
self.one_complete_example('''complete_cv_example.py''' , _a , _a , _a )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
@classmethod
def __lowercase ( cls ) -> List[Any]:
super().setUpClass()
_a : str = tempfile.mkdtemp()
_a : str = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_a : int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowercase ( cls ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
_a : List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __lowercase ( self ) -> Any:
_a : Dict = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
_a : str = run_command(self._launch_args + testargs , return_stdout=_a )
self.assertNotIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
_a : Optional[int] = run_command(self._launch_args + testargs , return_stdout=_a )
if torch.cuda.is_available():
_a : List[Any] = torch.cuda.device_count()
else:
_a : Tuple = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
else:
self.assertIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : List[str] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_a : Tuple = run_command(self._launch_args + testargs , return_stdout=_a )
_a : int = re.findall('''({.+})''' , _a )
_a : int = [r for r in results if '''accuracy''' in r][-1]
_a : Optional[Any] = ast.literal_eval(_a )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def __lowercase ( self ) -> str:
_a : Optional[int] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
_a : str = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_a , '''tracking''' ) ) )
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 14
|
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Optional[Any] = "▁"
_a : str = {"vocab_file": "prophetnet.tokenizer"}
_a : Any = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
_a : List[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
_a : int = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = collections.OrderedDict()
with open(lowerCamelCase__ , "r" , encoding="utf-8" ) as reader:
__UpperCAmelCase : Any = reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = token.rstrip("\n" )
__UpperCAmelCase : Optional[int] = index
return vocab
class __A (__magic_name__ ):
snake_case :Tuple = VOCAB_FILES_NAMES
snake_case :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :str = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[UNK]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCAmelCase : Union[str, Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
__UpperCAmelCase : List[str] = f"""[unused{i}]"""
__UpperCAmelCase : Any = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCAmelCase : Any = 12
__UpperCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCamelCase_ )
def __getstate__( self ):
__UpperCAmelCase : Dict = self.__dict__.copy()
__UpperCAmelCase : Tuple = None
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return ([0] * len(UpperCamelCase_ )) + [1]
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Dict = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCAmelCase : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 168
| 0
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ):
A_ : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
A_ : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
A_ : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
A_ : List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
A_ : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
A_ : Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
A_ : List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
A_ : Any = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
if split_mlp_wi:
A_ : str = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
A_ : Tuple = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
A_ : Union[str, Any] = (wi_a, wi_a)
else:
A_ : Dict = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
A_ : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCamelCase ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ):
A_ : Any = traverse_util.flatten_dict(variables["""target"""] )
A_ : Optional[int] = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A_ : str = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
A_ : int = collections.OrderedDict()
# Shared embeddings.
A_ : Optional[Any] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
A_ : Optional[Any] = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
A_ , A_ , A_ , A_ : Optional[int] = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
A_ : Any = layer_norm
A_ : Optional[Any] = k.T
A_ : Any = o.T
A_ : str = q.T
A_ : List[Any] = v.T
# Block i, layer 1 (MLP).
A_ : Dict = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
A_ , A_ : int = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
A_ : Optional[Any] = layer_norm
if split_mlp_wi:
A_ : List[str] = wi[0].T
A_ : str = wi[1].T
else:
A_ : Dict = wi.T
A_ : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A_ : Optional[int] = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
A_ : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
A_ : List[str] = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
A_ : str = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
A_ : List[str] = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
A_ , A_ , A_ , A_ : Dict = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
A_ : List[str] = layer_norm
A_ : Dict = k.T
A_ : int = o.T
A_ : Any = q.T
A_ : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
A_ : List[str] = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
A_ , A_ , A_ , A_ : int = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
A_ : Tuple = layer_norm
A_ : int = k.T
A_ : Optional[int] = o.T
A_ : List[Any] = q.T
A_ : str = v.T
# Block i, layer 2 (MLP).
A_ : List[Any] = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
A_ , A_ : Dict = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
A_ : int = layer_norm
if split_mlp_wi:
A_ : List[str] = wi[0].T
A_ : Tuple = wi[1].T
else:
A_ : int = wi.T
A_ : Optional[int] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A_ : Union[str, Any] = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
A_ : List[Any] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A_ : Union[str, Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A_ : List[str] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A_ : Any = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
A_ : str = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
A_ : Any = checkpoints.load_tax_checkpoint(snake_case__ )
A_ : Union[str, Any] = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
A_ : int = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ):
A_ : Any = MTaConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A_ : Optional[Any] = UMTaEncoderModel(snake_case__ )
else:
A_ : Optional[Any] = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
_lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 480
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( snake_case__ , snake_case__=False ):
A_ : Dict = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Any = """"""
else:
A_ : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
A_ : Union[str, Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Dict = in_proj_weight[
: config.hidden_size, :
]
A_ : int = in_proj_bias[: config.hidden_size]
A_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( snake_case__ ):
A_ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Union[str, Any] = dct.pop(snake_case__ )
A_ : List[Any] = val
def __UpperCamelCase ( ):
A_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : str = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False ):
A_ : str = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=snake_case__ , )
A_ : int = ViTHybridConfig(backbone_config=snake_case__ , image_size=384 , num_labels=1_000 )
A_ : Any = False
# load original model from timm
A_ : str = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
A_ : Dict = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
A_ : Any = """huggingface/label-files"""
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : int = ViTHybridModel(snake_case__ ).eval()
else:
A_ : Union[str, Any] = ViTHybridForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# create image processor
A_ : Any = create_transform(**resolve_data_config({} , model=snake_case__ ) )
A_ : List[Any] = transform.transforms
A_ : int = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
A_ : Optional[int] = ViTHybridImageProcessor(
do_resize=snake_case__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=snake_case__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Tuple = prepare_img()
A_ : List[str] = transform(snake_case__ ).unsqueeze(0 )
A_ : int = processor(snake_case__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case__ , snake_case__ )
# verify logits
with torch.no_grad():
A_ : List[str] = model(snake_case__ )
A_ : Any = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
A_ : int = timm_model.forward_features(snake_case__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case__ , outputs.pooler_output , atol=1E-3 )
else:
A_ : Optional[int] = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
_lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 480
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__snake_case :Optional[int] =logging.get_logger(__name__)
__snake_case :Optional[Any] ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case :Optional[Any] ={
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case :Tuple ={
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case :List[Any] ={
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case :Dict ={
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__snake_case :List[Any] ={
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__snake_case :Dict ={
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__snake_case :str ={
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__snake_case :Optional[int] ={
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__snake_case :Any ={
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : str = VOCAB_FILES_NAMES
A_ : Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A_ : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A_ : Any = DPRContextEncoderTokenizer
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Tuple = VOCAB_FILES_NAMES
A_ : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A_ : str = DPRQuestionEncoderTokenizer
__snake_case :Tuple =collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__snake_case :Dict =collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__snake_case :Optional[int] =r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_lowerCamelCase )
class lowerCAmelCase__ :
def __call__( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Union[bool, str] = False , __UpperCamelCase : Union[bool, str] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[bool] = None , **__UpperCamelCase : str , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
A = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
A = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
A = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
A = len(__UpperCamelCase )
A = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), f'''There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.'''
A = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
A = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
A = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
A = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : BatchEncoding , __UpperCamelCase : DPRReaderOutput , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 64 , __UpperCamelCase : int = 4 , ) -> List[DPRSpanPrediction]:
A = reader_input['input_ids']
A , A , A = reader_output[:3]
A = len(__UpperCamelCase )
A = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
A = []
for doc_id in sorted_docs:
A = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A = sequence_ids.index(self.pad_token_id )
else:
A = len(__UpperCamelCase )
A = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : List[int] , __UpperCamelCase : int , __UpperCamelCase : int , ) -> List[DPRSpanPrediction]:
A = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
A = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
A = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCamelCase )
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = VOCAB_FILES_NAMES
A_ : str = READER_PRETRAINED_VOCAB_FILES_MAP
A_ : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION
A_ : List[str] = ['input_ids', 'attention_mask']
A_ : List[Any] = DPRReaderTokenizer
| 106
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Optional[int] = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
re.sub("<n>" , "" , snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 400
| 0
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = DebertaTokenizer
A : List[Any] = True
A : Tuple = DebertaTokenizerFast
def snake_case__ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case : List[str] = {"""unk_token""": """[UNK]"""}
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def snake_case__ ( self : List[Any] , **_lowerCAmelCase : Tuple ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] ):
__snake_case : Tuple = """lower newer"""
__snake_case : Tuple = """lower newer"""
return input_text, output_text
def snake_case__ ( self : Optional[Any] ):
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Dict = """lower newer"""
__snake_case : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__snake_case : Optional[Any] = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = tokens + [tokenizer.unk_token]
__snake_case : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case__ ( self : List[Any] ):
__snake_case : Any = self.get_tokenizer()
__snake_case : Any = tokenizer("""Hello""" , """World""" )
__snake_case : Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , _lowerCAmelCase )
@slow
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Dict = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__snake_case : int = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
__snake_case : Tuple = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
__snake_case : List[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__snake_case : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case : str = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case__ ( self : str ):
__snake_case : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__snake_case : int = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__snake_case : Any = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
__snake_case : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase )
__snake_case : Union[str, Any] = [tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) for seq in encoding["""input_ids"""]]
# fmt: off
__snake_case : List[str] = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__snake_case : Tuple = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , _lowerCAmelCase )
for expected, decoded in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 712
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "camembert"
def __init__( self : List[Any] , _lowerCAmelCase : Any=3_05_22 , _lowerCAmelCase : str=7_68 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : Optional[Any]=30_72 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Any=5_12 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[str]="absolute" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : Optional[int] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : List[str] = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[Any] = use_cache
__snake_case : str = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 390
| 0
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__a: List[Any] = logging.get_logger(__name__)
class UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "generated"
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> str:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _lowerCAmelCase( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> List[str]:
lowercase__ : Any = {}
if truncation is not None:
lowercase__ : Optional[int] = truncation
lowercase__ : Union[str, Any] = generate_kwargs
lowercase__ : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase__ : Any = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase__ : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ : Any = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase__ : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
return True
def _lowerCAmelCase( self , *__lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : str = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , __lowerCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase__ : Any = ([prefix + arg for arg in args[0]],)
lowercase__ : Dict = True
elif isinstance(args[0] , __lowerCAmelCase ):
lowercase__ : Tuple = (prefix + args[0],)
lowercase__ : List[str] = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase__ : List[Any] = self.tokenizer(*__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
lowercase__ : Optional[Any] = super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
if (
isinstance(args[0] , __lowerCAmelCase )
and all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for el in args[0] )
and all(len(__lowerCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , **__lowerCAmelCase ) -> Optional[int]:
lowercase__ : Union[str, Any] = self._parse_and_tokenize(__lowerCAmelCase , truncation=__lowerCAmelCase , **__lowerCAmelCase )
return inputs
def _lowerCAmelCase( self , __lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
if self.framework == "pt":
lowercase__ , lowercase__ : int = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase__ , lowercase__ : Tuple = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase__ : Optional[int] = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase__ : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(__lowerCAmelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase__ : List[str] = self.model.generate(**__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Union[str, Any] = output_ids.shape[0]
if self.framework == "pt":
lowercase__ : Tuple = output_ids.reshape(__lowerCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase__ : List[str] = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.TEXT , __lowerCAmelCase=False ) -> List[Any]:
lowercase__ : Optional[Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase__ : Optional[Any] = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase__ : List[str] = {
F"""{self.return_name}_text""": self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "summary"
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "translation"
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def _lowerCAmelCase( self , *__lowerCAmelCase , __lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> str:
if getattr(self.tokenizer , '''_build_translation_inputs''' , __lowerCAmelCase ):
return self.tokenizer._build_translation_inputs(
*__lowerCAmelCase , return_tensors=self.framework , truncation=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase )
else:
return super()._parse_and_tokenize(*__lowerCAmelCase , truncation=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Tuple:
lowercase__ , lowercase__ , lowercase__ : Any = super()._sanitize_parameters(**__lowerCAmelCase )
if src_lang is not None:
lowercase__ : Tuple = src_lang
if tgt_lang is not None:
lowercase__ : List[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase__ : Union[str, Any] = kwargs.get('''task''' , self.task )
lowercase__ : Optional[Any] = task.split('''_''' )
if task and len(__lowerCAmelCase ) == 4:
# translation, XX, to YY
lowercase__ : List[Any] = items[1]
lowercase__ : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> int:
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
| 152
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = 0
lowercase__ : int = len(UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __UpperCamelCase ( UpperCAmelCase ):
if len(UpperCAmelCase ) <= 1:
return arr, 0
lowercase__ : List[str] = len(UpperCAmelCase ) // 2
lowercase__ : Optional[Any] = arr[0:mid]
lowercase__ : Any = arr[mid:]
lowercase__ , lowercase__ : Any = count_inversions_recursive(UpperCAmelCase )
lowercase__ , lowercase__ : List[str] = count_inversions_recursive(UpperCAmelCase )
lowercase__ , lowercase__ : Optional[Any] = _count_cross_inversions(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = 0
while i < len(UpperCAmelCase ) and j < len(UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __UpperCamelCase ( ):
lowercase__ : str = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase__ : Dict = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : Union[str, Any] = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase__ : Optional[int] = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : int = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase )
# an empty list should also have zero inversions
lowercase__ : Optional[Any] = []
lowercase__ : Any = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : List[Any] = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase )
if __name__ == "__main__":
main()
| 152
| 1
|
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =[int(lowerCAmelCase_ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(lowerCAmelCase_ ) == 4 and all(0 <= int(lowerCAmelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = input().strip()
__SCREAMING_SNAKE_CASE = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 153
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase = Features({'text': Value('string' )} )
_lowercase = Features({'labels': ClassLabel} )
_lowercase = "text"
_lowercase = "labels"
def __lowerCamelCase ( self , __UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : List[str] =self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : Tuple =features[self.label_column]
SCREAMING_SNAKE_CASE_ : str =label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 153
| 1
|
import baseaa
def __lowerCamelCase ( A__ : str ) -> bytes:
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def __lowerCamelCase ( A__ : bytes ) -> str:
return baseaa.aaadecode(A__ ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCamelCase ( A__ : int ) -> List[str]:
lowerCamelCase_ : int = 384
if "tiny" in model_name:
lowerCamelCase_ : str = [3, 3, 9, 3]
lowerCamelCase_ : List[str] = [96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase_ : Any = [3, 3, 27, 3]
lowerCamelCase_ : List[Any] = [96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase_ : Optional[Any] = [3, 3, 27, 3]
lowerCamelCase_ : int = [128, 256, 512, 1024]
lowerCamelCase_ : int = 512
if "large" in model_name:
lowerCamelCase_ : Union[str, Any] = [3, 3, 27, 3]
lowerCamelCase_ : Dict = [192, 384, 768, 1536]
lowerCamelCase_ : Union[str, Any] = 768
if "xlarge" in model_name:
lowerCamelCase_ : Union[str, Any] = [3, 3, 27, 3]
lowerCamelCase_ : Optional[int] = [256, 512, 1024, 2048]
lowerCamelCase_ : List[str] = 1024
# set label information
lowerCamelCase_ : str = 150
lowerCamelCase_ : List[Any] = """huggingface/label-files"""
lowerCamelCase_ : int = """ade20k-id2label.json"""
lowerCamelCase_ : List[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_ : Optional[Any] = {int(A__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase_ : Dict = ConvNextConfig(
depths=A__ , hidden_sizes=A__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
lowerCamelCase_ : str = UperNetConfig(
backbone_config=A__ , auxiliary_in_channels=A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ , )
return config
def __lowerCamelCase ( A__ : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ : Tuple = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __lowerCamelCase ( A__ : Optional[Any] , A__ : List[Any] , A__ : int ) -> Union[str, Any]:
lowerCamelCase_ : Dict = dct.pop(A__ )
lowerCamelCase_ : int = val
def __lowerCamelCase ( A__ : Tuple , A__ : str , A__ : Any ) -> Optional[int]:
lowerCamelCase_ : int = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
lowerCamelCase_ : List[str] = model_name_to_url[model_name]
lowerCamelCase_ : Dict = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase_ : List[Any] = get_upernet_config(A__ )
lowerCamelCase_ : Any = UperNetForSemanticSegmentation(A__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase_ : Tuple = state_dict.pop(A__ )
if "bn" in key:
lowerCamelCase_ : Dict = key.replace("""bn""" , """batch_norm""" )
lowerCamelCase_ : Tuple = val
# rename keys
lowerCamelCase_ : List[Any] = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
model.load_state_dict(A__ )
# verify on image
lowerCamelCase_ : Any = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
lowerCamelCase_ : Optional[int] = Image.open(requests.get(A__ , stream=A__ ).raw ).convert("""RGB""" )
lowerCamelCase_ : List[str] = SegformerImageProcessor()
lowerCamelCase_ : Union[str, Any] = processor(A__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
lowerCamelCase_ : List[str] = model(A__ )
if model_name == "upernet-convnext-tiny":
lowerCamelCase_ : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase_ : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase_ : str = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase_ : List[str] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase_ : Any = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case__ : List[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278
| 1
|
'''simple docstring'''
from collections import namedtuple
lowerCAmelCase__ = namedtuple("""from_to""", """from_ to""")
lowerCAmelCase__ = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_01, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_04_54, 2_6_4.1_7_2),
"""cubicyard""": from_to(0.7_64_55, 1.3_07_95),
"""cubicfoot""": from_to(0.0_28, 35.31_47),
"""cup""": from_to(0.0_00_23_65_88, 4_2_2_6.7_5),
}
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(UpperCAmelCase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(UpperCAmelCase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCamelCase ( ) ->Optional[int]:
_lowerCamelCase : int = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
_lowerCamelCase : Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
_lowerCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_lowerCamelCase : List[str] = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 434
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = ["model.decoder.embed_positions.weights"]
def _lowerCAmelCase ( UpperCamelCase_ ):
if "emb" in name:
__SCREAMING_SNAKE_CASE = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__SCREAMING_SNAKE_CASE = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__SCREAMING_SNAKE_CASE = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__SCREAMING_SNAKE_CASE = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = list(state_dict.keys() )
__SCREAMING_SNAKE_CASE = {}
for key in keys:
__SCREAMING_SNAKE_CASE = state_dict.pop(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = rename_keys(__SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
__SCREAMING_SNAKE_CASE = val[:hidden_size, :]
__SCREAMING_SNAKE_CASE = val[hidden_size : 2 * hidden_size, :]
__SCREAMING_SNAKE_CASE = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__SCREAMING_SNAKE_CASE = val
else:
__SCREAMING_SNAKE_CASE = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( UpperCamelCase_ ):
if checkpoint == "small":
# default config values
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
elif checkpoint == "medium":
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 48
__SCREAMING_SNAKE_CASE = 24
elif checkpoint == "large":
__SCREAMING_SNAKE_CASE = 2048
__SCREAMING_SNAKE_CASE = 48
__SCREAMING_SNAKE_CASE = 32
else:
raise ValueError(f"Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}." )
__SCREAMING_SNAKE_CASE = MusicgenDecoderConfig(
hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="cpu" ):
__SCREAMING_SNAKE_CASE = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = fairseq_model.lm.state_dict()
__SCREAMING_SNAKE_CASE = rename_state_dict(
__SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
__SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained("""t5-base""" )
__SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__SCREAMING_SNAKE_CASE = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__SCREAMING_SNAKE_CASE = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__SCREAMING_SNAKE_CASE = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE )
# check we can do a forward pass
__SCREAMING_SNAKE_CASE = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__SCREAMING_SNAKE_CASE = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""t5-base""" )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__SCREAMING_SNAKE_CASE = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
__SCREAMING_SNAKE_CASE = 2048
__SCREAMING_SNAKE_CASE = 2048
# set other default generation config params
__SCREAMING_SNAKE_CASE = int(30 * audio_encoder.config.frame_rate )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 3.0
if pytorch_dump_folder is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__magic_name__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 704
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase_ , int(b / 2 ) ) * actual_power(UpperCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase_ , int(b / 2 ) ) * actual_power(UpperCamelCase_ , int(b / 2 ) )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if b < 0:
return 1 / actual_power(UpperCamelCase_ , UpperCamelCase_ )
return actual_power(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 248
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__).image_processor
def __UpperCamelCase ( self) -> List[str]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> Any:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
a__ =self.prepare_image_inputs()
a__ =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np')
a__ =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> str:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
a__ ="""lower newer"""
a__ =processor(text=SCREAMING_SNAKE_CASE__)
a__ =tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> Dict:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
a__ ="""lower newer"""
a__ =self.prepare_image_inputs()
a__ =processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__):
processor()
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(SCREAMING_SNAKE_CASE__)
a__ =tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__)
a__ ="""lower newer"""
a__ =self.prepare_image_inputs()
a__ =processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = '''unispeech-sat'''
def __init__(self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.05 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=3_20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 15_00) , SCREAMING_SNAKE_CASE__=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE__=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5_04 , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE__ : Tuple = feat_extract_activation
SCREAMING_SNAKE_CASE__ : List[Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = conv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : str = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : str = activation_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : Dict = final_dropout
SCREAMING_SNAKE_CASE__ : List[str] = layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_clusters
SCREAMING_SNAKE_CASE__ : List[Any] = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE__ : Tuple = mask_time_prob
SCREAMING_SNAKE_CASE__ : str = mask_time_length
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : int = mask_feature_prob
SCREAMING_SNAKE_CASE__ : str = mask_feature_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ : int = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ : Optional[int] = num_codevector_groups
SCREAMING_SNAKE_CASE__ : List[Any] = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_quantizer_dropout
SCREAMING_SNAKE_CASE__ : str = num_negatives
SCREAMING_SNAKE_CASE__ : List[Any] = codevector_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = proj_codevector_dim
SCREAMING_SNAKE_CASE__ : List[Any] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ : int = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : Optional[int] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Any = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = xvector_output_dim
@property
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 223
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
snake_case_ = parser.parse_args()
if args.model_type == "roberta":
snake_case_ = RobertaForMaskedLM.from_pretrained(args.model_name)
snake_case_ = 'roberta'
elif args.model_type == "gpt2":
snake_case_ = GPTaLMHeadModel.from_pretrained(args.model_name)
snake_case_ = 'transformer'
snake_case_ = model.state_dict()
snake_case_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
snake_case_ = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
snake_case_ = F'''{prefix}.embeddings.{w}.weight'''
snake_case_ = state_dict[param_name]
for w in ["weight", "bias"]:
snake_case_ = F'''{prefix}.embeddings.LayerNorm.{w}'''
snake_case_ = state_dict[param_name]
# Transformer Blocks #
snake_case_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
snake_case_ = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
snake_case_ = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
snake_case_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
snake_case_ = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
snake_case_ = state_dict[F'''lm_head.dense.{w}''']
snake_case_ = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
snake_case_ = state_dict[F'''{prefix}.ln_f.{w}''']
snake_case_ = state_dict['lm_head.weight']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 68
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
snake_case_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
snake_case_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
snake_case_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in predictions] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([re.sub(lowercase__ , "" , lowercase__ ) for x in references] )
else:
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = np.asarray(lowercase__ )
if ignore_case:
SCREAMING_SNAKE_CASE_ : Dict = np.char.lower(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.char.lower(lowercase__ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.punctuation.maketrans("" , "" , string.punctuation )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
SCREAMING_SNAKE_CASE_ : Optional[int] = string.digits.maketrans("" , "" , string.digits )
SCREAMING_SNAKE_CASE_ : Dict = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase__ , table=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 100}
| 68
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any]=14 , _UpperCAmelCase : int=7 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : str=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=99 , _UpperCAmelCase : int=32 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[Any]=0.02 , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = rotary_dim
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = None
UpperCAmelCase__ = vocab_size - 1
UpperCAmelCase__ = vocab_size - 1
UpperCAmelCase__ = vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = 20
UpperCAmelCase__ = model_class_name(_UpperCAmelCase )
UpperCAmelCase__ = model.init_cache(input_ids.shape[0] , _UpperCAmelCase )
UpperCAmelCase__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
UpperCAmelCase__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase__ = model(
input_ids[:, :-1] , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCAmelCase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase__ = model(
input_ids[:, -1:] , attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=_UpperCAmelCase , )
UpperCAmelCase__ = model(_UpperCAmelCase )
UpperCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = 20
UpperCAmelCase__ = model_class_name(_UpperCAmelCase )
UpperCAmelCase__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCAmelCase__ = model.init_cache(input_ids.shape[0] , _UpperCAmelCase )
UpperCAmelCase__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase__ = model(
input_ids[:, :-1] , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCAmelCase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase_ : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = FlaxGPTJModelTester(self )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
UpperCAmelCase__ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )
UpperCAmelCase__ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
UpperCAmelCase__ = False
UpperCAmelCase__ = model.config.eos_token_id
UpperCAmelCase__ = jax.jit(model.generate )
UpperCAmelCase__ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = pt_inputs["""input_ids"""].shape
UpperCAmelCase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = pt_model_class(_UpperCAmelCase ).eval()
UpperCAmelCase__ = model_class(_UpperCAmelCase , dtype=jnp.floataa )
UpperCAmelCase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _UpperCAmelCase )
UpperCAmelCase__ = fx_state
with torch.no_grad():
UpperCAmelCase__ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCAmelCase__ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = model_class.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
UpperCAmelCase__ = fx_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(
len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = pt_model_class(_UpperCAmelCase ).eval()
UpperCAmelCase__ = model_class(_UpperCAmelCase , dtype=jnp.floataa )
UpperCAmelCase__ = load_flax_weights_in_pytorch_model(_UpperCAmelCase , fx_model.params )
UpperCAmelCase__ , UpperCAmelCase__ = pt_inputs["""input_ids"""].shape
UpperCAmelCase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase__ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCAmelCase__ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = pt_model_class.from_pretrained(_UpperCAmelCase , from_flax=_UpperCAmelCase )
with torch.no_grad():
UpperCAmelCase__ = pt_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(
len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 603
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __lt__( self : List[Any] , _UpperCAmelCase : Dict ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
return self[-1] == other[-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list ):
'''simple docstring'''
UpperCAmelCase__ = []
# sort into stacks
for element in collection:
UpperCAmelCase__ = Stack([element] )
UpperCAmelCase__ = bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if i != len(SCREAMING_SNAKE_CASE__ ):
stacks[i].append(SCREAMING_SNAKE_CASE__ )
else:
stacks.append(SCREAMING_SNAKE_CASE__ )
# use a heap-based merge to merge stack efficiently
UpperCAmelCase__ = merge(*(reversed(SCREAMING_SNAKE_CASE__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 603
| 1
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCamelCase ( snake_case_ : Tuple ,snake_case_ : Dict ,snake_case_ : List[Any] ,snake_case_ : Tuple=5 ):
'''simple docstring'''
assert masked_input.count("""<mask>""" ) == 1
__snake_case :Any = torch.tensor(tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ) ).unsqueeze(0 ) # Batch size 1
__snake_case :Union[str, Any] = model(lowerCAmelCase__ )[0] # The last hidden-state is the first element of the output tuple
__snake_case :Dict = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__snake_case :Optional[int] = logits[0, masked_index, :]
__snake_case :Optional[Any] = logits.softmax(dim=0 )
__snake_case :Dict = prob.topk(k=lowerCAmelCase__ ,dim=0 )
__snake_case :Union[str, Any] = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase__ ) )] )
__snake_case :Dict = tokenizer.mask_token
__snake_case :Optional[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
__snake_case :Dict = predicted_token_bpe.replace("""\u2581""" ,""" """ )
if " {0}".format(lowerCAmelCase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(lowerCAmelCase__ ) ,lowerCAmelCase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowerCAmelCase__ ,lowerCAmelCase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCamelCase__ = CamembertTokenizer.from_pretrained("""camembert-base""")
lowerCamelCase__ = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
lowerCamelCase__ = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 709
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case__ :
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : int
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ ) -> Any:
'''simple docstring'''
__snake_case :list[list[Edge]] = [[] for _ in range(a__ )]
__snake_case :List[str] = size
def __getitem__( self , a__ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __lowercase ( self ) -> str:
'''simple docstring'''
return self._size
def __lowercase ( self , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a__ , a__ ) )
def __lowercase ( self , a__ , a__ ) -> int | None:
'''simple docstring'''
__snake_case :Optional[Any] = deque([start_vertex] )
__snake_case :list[int | None] = [None] * self.size
__snake_case :Tuple = 0
while queue:
__snake_case :List[Any] = queue.popleft()
__snake_case :Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__snake_case :Optional[Any] = current_distance + edge.weight
__snake_case :Dict = distances[edge.destination_vertex]
if (
isinstance(a__ , a__ )
and new_distance >= dest_vertex_distance
):
continue
__snake_case :Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.