code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ) -> Any:
A = dataset
A = process
A = params
def __len__( self : List[Any] ) -> Optional[int]:
return len(self.dataset )
def __getitem__( self : Tuple , __UpperCamelCase : int ) -> List[Any]:
A = self.dataset[i]
A = self.process(UpperCamelCase__ , **self.params )
return processed
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=None ) -> Union[str, Any]:
A = loader
A = infer
A = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
A = None
A = loader_batch_size
# Internal bookkeeping
A = None
A = None
def __len__( self : Any ) -> Any:
return len(self.loader )
def __iter__( self : str ) -> Optional[int]:
A = iter(self.loader )
return self
def __UpperCamelCase ( self : Any ) -> List[Any]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
A = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
A = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
A = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
A = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
A = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
A = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
A = next(self.iterator )
A = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
A = processed
else:
A = list(processed.keys() )[0]
A = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = len(UpperCamelCase__ )
else:
A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A = observed_batch_size
# Setting internal index to unwrap the batch
A = processed
A = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=None ) -> Optional[int]:
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Optional[int] ) -> Optional[int]:
A = iter(self.loader )
A = None
return self
def __UpperCamelCase ( self : Any ) -> Optional[int]:
if self.subiterator is None:
A = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
A = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
A = self.infer(next(self.iterator ) , **self.params )
A = next(self.subiterator )
return processed
class lowerCAmelCase__ ( _lowerCamelCase ):
def __iter__( self : Union[str, Any] ) -> Dict:
A = iter(self.loader )
return self
def __UpperCamelCase ( self : List[str] ) -> List[str]:
A = False
A = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
A = self.loader_batch_item()
A = item.pop('is_last' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
A = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
A = processed
else:
A = list(processed.keys() )[0]
A = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = len(UpperCamelCase__ )
else:
A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A = observed_batch_size
A = processed
A = 0
while self._loader_batch_index < self.loader_batch_size:
A = self.loader_batch_item()
A = item.pop('is_last' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
A = processed
A = item.pop('is_last' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> Dict:
A = dataset
A = key
def __len__( self : Dict ) -> Tuple:
return len(self.dataset )
def __getitem__( self : Optional[Any] , __UpperCamelCase : Optional[int] ) -> Optional[Any]:
return self.dataset[i][self.key]
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) -> Optional[Any]:
A = dataset
A = keya
A = keya
def __len__( self : Union[str, Any] ) -> List[str]:
return len(self.dataset )
def __getitem__( self : int , __UpperCamelCase : Union[str, Any] ) -> Optional[int]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 106
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase : Optional[int] = logging.getLogger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCamelCase__=-1 ) -> str:
'''simple docstring'''
lowerCamelCase_ = label_idx
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[InputExample]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = mode.value
lowerCamelCase_ = os.path.join(UpperCamelCase__ , F"""{mode}.txt""" )
lowerCamelCase_ = 1
lowerCamelCase_ = []
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
lowerCamelCase_ = []
lowerCamelCase_ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
guid_index += 1
lowerCamelCase_ = []
lowerCamelCase_ = []
else:
lowerCamelCase_ = line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase__ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
return examples
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCamelCase_ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase__ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if path:
with open(UpperCamelCase__ , '''r''' ) as f:
lowerCamelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCamelCase_ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self ) -> Optional[int]:
'''simple docstring'''
super().__init__(label_idx=-2 )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if path:
with open(UpperCamelCase__ , '''r''' ) as f:
lowerCamelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCamelCase_ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase ( a ):
"""simple docstring"""
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[InputExample]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = mode.value
lowerCamelCase_ = os.path.join(UpperCamelCase__ , F"""{mode}.txt""" )
lowerCamelCase_ = 1
lowerCamelCase_ = []
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
guid_index += 1
return examples
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 0
for sentence in parse_incr(UpperCamelCase__ ):
lowerCamelCase_ = preds_list[example_id]
lowerCamelCase_ = ''''''
for token in sentence:
out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCamelCase__ )
example_id += 1
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if path:
with open(UpperCamelCase__ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 142
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_lowercase : Dict =logging.get_logger(__name__)
_lowercase : Tuple ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : int ={
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
_lowercase : Optional[Any] ={
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
_lowercase : List[str] ={
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _SCREAMING_SNAKE_CASE (__SCREAMING_SNAKE_CASE ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = ['input_ids', 'attention_mask']
A__ = DistilBertTokenizer
def __init__( self : Dict , __UpperCamelCase : List[str]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : List[str]="[UNK]" , __UpperCamelCase : int="[SEP]" , __UpperCamelCase : Dict="[PAD]" , __UpperCamelCase : Tuple="[CLS]" , __UpperCamelCase : List[str]="[MASK]" , __UpperCamelCase : str=True , __UpperCamelCase : Any=None , **__UpperCamelCase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
snake_case__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
snake_case__ : int = getattr(_a , normalizer_state.pop('''type''' ) )
snake_case__ : Optional[Any] = do_lower_case
snake_case__ : str = strip_accents
snake_case__ : int = tokenize_chinese_chars
snake_case__ : Optional[Any] = normalizer_class(**_a )
snake_case__ : Union[str, Any] = do_lower_case
def lowerCAmelCase ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str=None ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str = None ) -> List[str]:
"""simple docstring"""
snake_case__ : str = [self.sep_token_id]
snake_case__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] = None ) -> int:
"""simple docstring"""
snake_case__ : str = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 721
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase : Optional[Any] =re.compile(R"\b(a|an|the)\b", re.UNICODE)
_lowercase : Dict =None
def __UpperCAmelCase ( ) -> List[str]:
snake_case__ : str = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=UpperCamelCase__ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=UpperCamelCase__ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] ) -> Tuple:
snake_case__ : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ : List[Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __UpperCAmelCase ( UpperCamelCase__ :Dict ) -> Optional[int]:
def remove_articles(UpperCamelCase__ :Dict ):
return ARTICLES_REGEX.sub(''' ''' , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ :int ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ :List[Any] ):
snake_case__ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ :Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def __UpperCAmelCase ( UpperCamelCase__ :Any ) -> str:
if not s:
return []
return normalize_answer(UpperCamelCase__ ).split()
def __UpperCAmelCase ( UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] ) -> int:
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :Union[str, Any] ) -> Dict:
snake_case__ : Tuple = get_tokens(UpperCamelCase__ )
snake_case__ : List[str] = get_tokens(UpperCamelCase__ )
snake_case__ : Optional[Any] = collections.Counter(UpperCamelCase__ ) & collections.Counter(UpperCamelCase__ )
snake_case__ : List[Any] = sum(common.values() )
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case__ : Union[str, Any] = 1.0 * num_same / len(UpperCamelCase__ )
snake_case__ : Optional[Any] = 1.0 * num_same / len(UpperCamelCase__ )
snake_case__ : str = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] , UpperCamelCase__ :List[str] ) -> int:
snake_case__ : Union[str, Any] = {}
snake_case__ : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ : int = qa['''id''']
snake_case__ : Dict = [t for t in qa['''answers''']['''text'''] if normalize_answer(UpperCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case__ : List[Any] = ['''''']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
snake_case__ : List[str] = preds[qid]
# Take max over all gold answers
snake_case__ : Dict = max(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for a in gold_answers )
snake_case__ : Optional[Any] = max(compute_fa(UpperCamelCase__ , UpperCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def __UpperCAmelCase ( UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :int ) -> int:
snake_case__ : Optional[int] = {}
for qid, s in scores.items():
snake_case__ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case__ : Tuple = float(not qid_to_has_ans[qid] )
else:
snake_case__ : str = s
return new_scores
def __UpperCAmelCase ( UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Dict , UpperCamelCase__ :Dict=None ) -> Optional[Any]:
if not qid_list:
snake_case__ : str = len(UpperCamelCase__ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
snake_case__ : Union[str, Any] = len(UpperCamelCase__ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __UpperCAmelCase ( UpperCamelCase__ :List[str] , UpperCamelCase__ :int , UpperCamelCase__ :str ) -> Tuple:
for k in new_eval:
snake_case__ : Optional[Any] = new_eval[k]
def __UpperCAmelCase ( UpperCamelCase__ :List[str] , UpperCamelCase__ :Dict , UpperCamelCase__ :List[str] , UpperCamelCase__ :Tuple ) -> List[str]:
plt.step(UpperCamelCase__ , UpperCamelCase__ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(UpperCamelCase__ , UpperCamelCase__ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCamelCase__ )
plt.savefig(UpperCamelCase__ )
plt.clf()
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :int , UpperCamelCase__ :Tuple=None , UpperCamelCase__ :List[str]=None ) -> Any:
snake_case__ : Any = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : na_probs[k] )
snake_case__ : Optional[Any] = 0.0
snake_case__ : Tuple = 1.0
snake_case__ : int = 0.0
snake_case__ : Any = [1.0]
snake_case__ : Dict = [0.0]
snake_case__ : Any = 0.0
for i, qid in enumerate(UpperCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case__ : str = true_pos / float(i + 1 )
snake_case__ : Any = true_pos / float(UpperCamelCase__ )
if i == len(UpperCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCamelCase__ )
recalls.append(UpperCamelCase__ )
if out_image:
plot_pr_curve(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return {"ap": 100.0 * avg_prec}
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Any , UpperCamelCase__ :Dict , UpperCamelCase__ :Tuple , UpperCamelCase__ :Any ) -> Dict:
if out_image_dir and not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
snake_case__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case__ : Dict = make_precision_recall_eval(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , out_image=os.path.join(UpperCamelCase__ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
snake_case__ : List[Any] = make_precision_recall_eval(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , out_image=os.path.join(UpperCamelCase__ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
snake_case__ : Union[str, Any] = {k: float(UpperCamelCase__ ) for k, v in qid_to_has_ans.items()}
snake_case__ : int = make_precision_recall_eval(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , out_image=os.path.join(UpperCamelCase__ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , '''pr_exact''' )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , '''pr_f1''' )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , '''pr_oracle''' )
def __UpperCAmelCase ( UpperCamelCase__ :Dict , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[Any] ) -> Any:
if not qid_list:
return
snake_case__ : List[Any] = [na_probs[k] for k in qid_list]
snake_case__ : Union[str, Any] = np.ones_like(UpperCamelCase__ ) / float(len(UpperCamelCase__ ) )
plt.hist(UpperCamelCase__ , weights=UpperCamelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(UpperCamelCase__ , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Any , UpperCamelCase__ :Optional[Any] ) -> List[str]:
snake_case__ : Optional[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case__ : Dict = num_no_ans
snake_case__ : Tuple = cur_score
snake_case__ : Tuple = 0.0
snake_case__ : str = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : na_probs[k] )
for i, qid in enumerate(UpperCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case__ : int = scores[qid]
else:
if preds[qid]:
snake_case__ : Optional[Any] = -1
else:
snake_case__ : Any = 0
cur_score += diff
if cur_score > best_score:
snake_case__ : Any = cur_score
snake_case__ : str = na_probs[qid]
return 100.0 * best_score / len(UpperCamelCase__ ), best_thresh
def __UpperCAmelCase ( UpperCamelCase__ :Any , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :str , UpperCamelCase__ :List[str] , UpperCamelCase__ :List[str] ) -> List[str]:
snake_case__ , snake_case__ : List[Any] = find_best_thresh(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case__ , snake_case__ : List[str] = find_best_thresh(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : str = best_exact
snake_case__ : int = exact_thresh
snake_case__ : Optional[int] = best_fa
snake_case__ : str = fa_thresh
def __UpperCAmelCase ( ) -> int:
with open(OPTS.data_file ) as f:
snake_case__ : Optional[int] = json.load(UpperCamelCase__ )
snake_case__ : str = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
snake_case__ : List[str] = json.load(UpperCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case__ : Tuple = json.load(UpperCamelCase__ )
else:
snake_case__ : Dict = {k: 0.0 for k in preds}
snake_case__ : Optional[int] = make_qid_to_has_ans(UpperCamelCase__ ) # maps qid to True/False
snake_case__ : Any = [k for k, v in qid_to_has_ans.items() if v]
snake_case__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
snake_case__ , snake_case__ : Optional[Any] = get_raw_scores(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Dict = apply_no_ans_threshold(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , OPTS.na_prob_thresh )
snake_case__ : str = apply_no_ans_threshold(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , OPTS.na_prob_thresh )
snake_case__ : Optional[Any] = make_eval_dict(UpperCamelCase__ , UpperCamelCase__ )
if has_ans_qids:
snake_case__ : List[str] = make_eval_dict(UpperCamelCase__ , UpperCamelCase__ , qid_list=UpperCamelCase__ )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , '''HasAns''' )
if no_ans_qids:
snake_case__ : List[Any] = make_eval_dict(UpperCamelCase__ , UpperCamelCase__ , qid_list=UpperCamelCase__ )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(UpperCamelCase__ , UpperCamelCase__ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(UpperCamelCase__ , UpperCamelCase__ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
else:
print(json.dumps(UpperCamelCase__ , indent=2 ) )
if __name__ == "__main__":
_lowercase : List[Any] =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 574
| 0
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def __a(SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
assert type(SCREAMING_SNAKE_CASE_ ) in (int, float) and decimal == int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = ""
_lowerCAmelCase = False
if decimal < 0:
_lowerCAmelCase = True
decimal *= -1
while decimal > 0:
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 16 )
_lowerCAmelCase = values[remainder] + hexadecimal
_lowerCAmelCase = "0x" + hexadecimal
if negative:
_lowerCAmelCase = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
import numpy as np
def __a ( __UpperCAmelCase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __a ( __UpperCAmelCase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = """pegasus"""
_lowerCamelCase : str = ["""past_key_values"""]
_lowerCamelCase : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , snake_case_ : str=5_0_2_6_5 , snake_case_ : str=1_0_2_4 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : Any=4_0_9_6 , snake_case_ : Union[str, Any]=1_6 , snake_case_ : Any=1_2 , snake_case_ : List[str]=4_0_9_6 , snake_case_ : int=1_6 , snake_case_ : Optional[int]=0.0 , snake_case_ : List[Any]=0.0 , snake_case_ : Any=True , snake_case_ : List[Any]=True , snake_case_ : str="gelu" , snake_case_ : Dict=1_0_2_4 , snake_case_ : Any=0.1 , snake_case_ : Any=0.0 , snake_case_ : List[Any]=0.0 , snake_case_ : int=0.0_2 , snake_case_ : Tuple=0 , snake_case_ : Optional[Any]=False , snake_case_ : List[Any]=0 , snake_case_ : int=1 , snake_case_ : Tuple=1 , **snake_case_ : Optional[Any] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
@property
def lowercase ( self : Any ):
return self.encoder_attention_heads
@property
def lowercase ( self : str ):
return self.d_model
| 119
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase_ ( __lowercase : List[str] ) -> int:
'''simple docstring'''
_UpperCAmelCase = SwinvaConfig()
_UpperCAmelCase = swinva_name.split("_" )
_UpperCAmelCase = name_split[1]
if "to" in name_split[3]:
_UpperCAmelCase = int(name_split[3][-3:] )
else:
_UpperCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
_UpperCAmelCase = int(name_split[2][-2:] )
else:
_UpperCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
_UpperCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_UpperCAmelCase = 2_1841
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-22k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase_ ( __lowercase : str ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase = "encoder." + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_UpperCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_UpperCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_UpperCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_UpperCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_UpperCAmelCase = "layernorm.weight"
if name == "norm.bias":
_UpperCAmelCase = "layernorm.bias"
if "head" in name:
_UpperCAmelCase = name.replace("head" , "classifier" )
else:
_UpperCAmelCase = "swinv2." + name
return name
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(__lowercase )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split("." )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[-dim:]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
_UpperCAmelCase = get_swinva_config(__lowercase )
_UpperCAmelCase = SwinvaForImageClassification(__lowercase )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , __lowercase )
model.load_state_dict(__lowercase )
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
_UpperCAmelCase = image_processor(images=__lowercase , return_tensors="pt" )
_UpperCAmelCase = timm_model(inputs["pixel_values"] )
_UpperCAmelCase = model(**__lowercase ).logits
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowercase )
model.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 119
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = tempfile.mkdtemp()
UpperCamelCase : List[str] = BlipImageProcessor()
UpperCamelCase : List[str] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
UpperCamelCase : Optional[Any] = BlipaProcessor(lowerCamelCase , lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCamelCase ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self , **lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : str = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
UpperCamelCase : Optional[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : List[str] = self.get_image_processor()
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : Dict = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
UpperCamelCase : str = self.prepare_image_inputs()
UpperCamelCase : Tuple = image_processor(lowerCamelCase , return_tensors="np" )
UpperCamelCase : List[str] = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Any = self.get_image_processor()
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : Any = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
UpperCamelCase : Any = "lower newer"
UpperCamelCase : Optional[Any] = processor(text=lowerCamelCase )
UpperCamelCase : int = tokenizer(lowerCamelCase , return_token_type_ids=lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : str = self.get_image_processor()
UpperCamelCase : Union[str, Any] = self.get_tokenizer()
UpperCamelCase : Union[str, Any] = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
UpperCamelCase : Optional[int] = "lower newer"
UpperCamelCase : Dict = self.prepare_image_inputs()
UpperCamelCase : List[Any] = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : List[str] = self.get_image_processor()
UpperCamelCase : Optional[Any] = self.get_tokenizer()
UpperCamelCase : Optional[int] = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
UpperCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : Tuple = processor.batch_decode(lowerCamelCase )
UpperCamelCase : List[str] = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = self.get_image_processor()
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : Optional[Any] = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
UpperCamelCase : str = "lower newer"
UpperCamelCase : List[Any] = self.prepare_image_inputs()
UpperCamelCase : Union[str, Any] = processor(text=lowerCamelCase , images=lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 173
|
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCAmelCase_ = True
from torch.cuda.amp import autocast
lowerCAmelCase_ = logging.getLogger(__name__)
def A__ ( A : str=None , A : Union[str, Any]=None):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__SCREAMING_SNAKE_CASE = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
__SCREAMING_SNAKE_CASE = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , lowerCamelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCamelCase : Dict = [{"input_values": feature["input_values"]} for feature in features]
UpperCamelCase : str = [{"input_ids": feature["labels"]} for feature in features]
UpperCamelCase : Optional[Any] = self.processor.pad(
lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
UpperCamelCase : Union[str, Any] = self.processor.pad(
labels=lowerCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
UpperCamelCase : int = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
UpperCamelCase : Any = labels
return batch
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> torch.Tensor:
'''simple docstring'''
model.train()
UpperCamelCase : List[Any] = self._prepare_inputs(lowerCamelCase )
if self.use_amp:
with autocast():
UpperCamelCase : Union[str, Any] = self.compute_loss(lowerCamelCase , lowerCamelCase )
else:
UpperCamelCase : List[str] = self.compute_loss(lowerCamelCase , lowerCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase : str = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase : List[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase )
else:
loss.backward()
return loss.detach()
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase : Any = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets:
UpperCamelCase : Union[str, Any] = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name)
UpperCamelCase : Optional[int] = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test")
# Create and save tokenizer
UpperCamelCase : List[str] = F'''[{''.join(data_args.chars_to_ignore)}]'''
def remove_special_characters(A : List[Any]):
UpperCamelCase : Optional[int] = re.sub(A , "" , batch["sentence"]).lower() + " "
return batch
UpperCamelCase : Any = train_dataset.map(A , remove_columns=["sentence"])
UpperCamelCase : int = eval_dataset.map(A , remove_columns=["sentence"])
def extract_all_chars(A : Union[str, Any]):
UpperCamelCase : Tuple = " ".join(batch["text"])
UpperCamelCase : Optional[Any] = list(set(A))
return {"vocab": [vocab], "all_text": [all_text]}
UpperCamelCase : Tuple = train_dataset.map(
A , batched=A , batch_size=-1 , keep_in_memory=A , remove_columns=train_dataset.column_names , )
UpperCamelCase : Optional[Any] = train_dataset.map(
A , batched=A , batch_size=-1 , keep_in_memory=A , remove_columns=eval_dataset.column_names , )
UpperCamelCase : Dict = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0]))
UpperCamelCase : Tuple = {v: k for k, v in enumerate(A)}
UpperCamelCase : Tuple = vocab_dict[" "]
del vocab_dict[" "]
UpperCamelCase : List[str] = len(A)
UpperCamelCase : Dict = len(A)
with open("vocab.json" , "w") as vocab_file:
json.dump(A , A)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : int = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=A , return_attention_mask=A)
UpperCamelCase : int = WavaVecaProcessor(feature_extractor=A , tokenizer=A)
UpperCamelCase : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer) , )
if data_args.max_train_samples is not None:
UpperCamelCase : Union[str, Any] = min(len(A) , data_args.max_train_samples)
UpperCamelCase : int = train_dataset.select(range(A))
if data_args.max_val_samples is not None:
UpperCamelCase : Dict = eval_dataset.select(range(data_args.max_val_samples))
UpperCamelCase : Union[str, Any] = torchaudio.transforms.Resample(4_80_00 , 1_60_00)
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A : Union[str, Any]):
UpperCamelCase , UpperCamelCase : List[str] = torchaudio.load(batch["path"])
UpperCamelCase : List[str] = resampler(A).squeeze().numpy()
UpperCamelCase : Dict = 1_60_00
UpperCamelCase : str = batch["text"]
return batch
UpperCamelCase : int = train_dataset.map(
A , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : int = eval_dataset.map(
A , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A : Dict):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"])) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
UpperCamelCase : Union[str, Any] = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0])
batch.update(A)
return batch
UpperCamelCase : str = train_dataset.map(
A , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : Union[str, Any] = eval_dataset.map(
A , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCamelCase : Tuple = datasets.load_metric("wer")
def compute_metrics(A : int):
UpperCamelCase : Union[str, Any] = pred.predictions
UpperCamelCase : Tuple = np.argmax(A , axis=-1)
UpperCamelCase : int = processor.tokenizer.pad_token_id
UpperCamelCase : Union[str, Any] = processor.batch_decode(A)
# we do not want to group tokens when computing the metrics
UpperCamelCase : List[Any] = processor.batch_decode(pred.label_ids , group_tokens=A)
UpperCamelCase : Optional[Any] = wer_metric.compute(predictions=A , references=A)
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCamelCase : Dict = DataCollatorCTCWithPadding(processor=A , padding=A)
# Initialize our Trainer
UpperCamelCase : int = CTCTrainer(
model=A , data_collator=A , args=A , compute_metrics=A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase : List[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
UpperCamelCase : Tuple = model_args.model_name_or_path
else:
UpperCamelCase : str = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank):
processor.save_pretrained(training_args.output_dir)
UpperCamelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=A)
trainer.save_model()
UpperCamelCase : int = train_result.metrics
UpperCamelCase : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A)
)
UpperCamelCase : int = min(A , len(A))
trainer.log_metrics("train" , A)
trainer.save_metrics("train" , A)
trainer.save_state()
# Evaluation
UpperCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
UpperCamelCase : Optional[Any] = trainer.evaluate()
UpperCamelCase : int = data_args.max_val_samples if data_args.max_val_samples is not None else len(A)
UpperCamelCase : Dict = min(A , len(A))
trainer.log_metrics("eval" , A)
trainer.save_metrics("eval" , A)
return results
if __name__ == "__main__":
main()
| 173
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a__ ( A__ ):
@staticmethod
@abstractmethod
def lowerCamelCase_ ( _lowerCamelCase :ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
raise NotImplementedError()
| 395
|
"""simple docstring"""
from __future__ import annotations
def A_ ( __lowercase ):
UpperCamelCase_ : List[Any] =0.00
UpperCamelCase_ : Dict =0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase_ : List[str] =F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__lowercase )
first_sum += 1 / float(__lowercase )
index += 1
return 1 / first_sum
def A_ ( __lowercase ):
UpperCamelCase_ : Union[str, Any] =0.00
UpperCamelCase_ : Optional[int] =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase_ : Tuple =F'''Resistor at index {index} has a negative value!'''
raise ValueError(__lowercase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 395
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
A : Any = 0
A : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
A : List[Any] = tuple[int, int]
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Node | None , ) -> None:
SCREAMING_SNAKE_CASE_ = pos_x
SCREAMING_SNAKE_CASE_ = pos_y
SCREAMING_SNAKE_CASE_ = (pos_y, pos_x)
SCREAMING_SNAKE_CASE_ = goal_x
SCREAMING_SNAKE_CASE_ = goal_y
SCREAMING_SNAKE_CASE_ = g_cost
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = self.calculate_heuristic()
SCREAMING_SNAKE_CASE_ = self.g_cost + self.h_cost
def __A ( self : int ) -> float:
SCREAMING_SNAKE_CASE_ = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__magic_name__ ) + abs(__magic_name__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[str] , __magic_name__ : Node ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : TPosition , __magic_name__ : TPosition ) -> Tuple:
SCREAMING_SNAKE_CASE_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __magic_name__ )
SCREAMING_SNAKE_CASE_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __magic_name__ )
SCREAMING_SNAKE_CASE_ = [self.start]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = False
def __A ( self : List[str] ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE_ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__magic_name__ )
self.closed_nodes.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_successors(__magic_name__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__magic_name__ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE_ = self.open_nodes.pop(self.open_nodes.index(__magic_name__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__magic_name__ )
else:
self.open_nodes.append(__magic_name__ )
return [self.start.pos]
def __A ( self : Dict , __magic_name__ : Node ) -> list[Node]:
SCREAMING_SNAKE_CASE_ = []
for action in delta:
SCREAMING_SNAKE_CASE_ = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__magic_name__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__magic_name__ , __magic_name__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __magic_name__ , ) )
return successors
def __A ( self : Union[str, Any] , __magic_name__ : Node | None ) -> list[TPosition]:
SCREAMING_SNAKE_CASE_ = node
SCREAMING_SNAKE_CASE_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE_ = current_node.parent
path.reverse()
return path
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : TPosition , __magic_name__ : TPosition ) -> None:
SCREAMING_SNAKE_CASE_ = AStar(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = AStar(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = False
def __A ( self : Optional[Any] ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE_ = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE_ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__magic_name__ , __magic_name__ )
self.fwd_astar.closed_nodes.append(__magic_name__ )
self.bwd_astar.closed_nodes.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = current_bwd_node
SCREAMING_SNAKE_CASE_ = current_fwd_node
SCREAMING_SNAKE_CASE_ = {
self.fwd_astar: self.fwd_astar.get_successors(__magic_name__ ),
self.bwd_astar: self.bwd_astar.get_successors(__magic_name__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__magic_name__ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE_ = astar.open_nodes.pop(
astar.open_nodes.index(__magic_name__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__magic_name__ )
else:
astar.open_nodes.append(__magic_name__ )
return [self.fwd_astar.start.pos]
def __A ( self : str , __magic_name__ : Node , __magic_name__ : Node ) -> list[TPosition]:
SCREAMING_SNAKE_CASE_ = self.fwd_astar.retrace_path(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.bwd_astar.retrace_path(__magic_name__ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
A : Dict = (0, 0)
A : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Optional[int] = time.time()
A : Dict = AStar(init, goal)
A : Optional[int] = a_star.search()
A : Optional[int] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
A : Dict = time.time()
A : Dict = BidirectionalAStar(init, goal)
A : List[Any] = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 140
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = VideoMAEConfig()
set_architecture_configs(__UpperCamelCase , __UpperCamelCase )
if "finetuned" not in model_name:
SCREAMING_SNAKE_CASE_ = False
if "finetuned" in model_name:
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
if "kinetics" in model_name:
SCREAMING_SNAKE_CASE_ = 4_0_0
SCREAMING_SNAKE_CASE_ = "kinetics400-id2label.json"
elif "ssv2" in model_name:
SCREAMING_SNAKE_CASE_ = 1_7_4
SCREAMING_SNAKE_CASE_ = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
return config
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if "small" in model_name:
SCREAMING_SNAKE_CASE_ = 3_8_4
SCREAMING_SNAKE_CASE_ = 1_5_3_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 1_9_2
SCREAMING_SNAKE_CASE_ = 7_6_8
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 5_1_2
SCREAMING_SNAKE_CASE_ = 2_0_4_8
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 1_2_8_0
SCREAMING_SNAKE_CASE_ = 5_1_2_0
SCREAMING_SNAKE_CASE_ = 3_2
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 6_4_0
SCREAMING_SNAKE_CASE_ = 2_5_6_0
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def a__ ( __UpperCamelCase ):
if "encoder." in name:
SCREAMING_SNAKE_CASE_ = name.replace("encoder." , "" )
if "cls_token" in name:
SCREAMING_SNAKE_CASE_ = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("attn" , "attention.self" )
if "attn" in name:
SCREAMING_SNAKE_CASE_ = name.replace("attn" , "attention.attention" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("head" , "classifier" )
return name
def a__ ( __UpperCamelCase , __UpperCamelCase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__UpperCamelCase )
if key.startswith("encoder." ):
SCREAMING_SNAKE_CASE_ = key.replace("encoder." , "" )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ = key.split("." )
if key.startswith("decoder.blocks" ):
SCREAMING_SNAKE_CASE_ = config.decoder_hidden_size
SCREAMING_SNAKE_CASE_ = int(key_split[2] )
SCREAMING_SNAKE_CASE_ = "decoder.decoder_layers."
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = config.hidden_size
SCREAMING_SNAKE_CASE_ = int(key_split[1] )
SCREAMING_SNAKE_CASE_ = "videomae.encoder.layer."
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val
return orig_state_dict
def a__ ( ):
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
SCREAMING_SNAKE_CASE_ = np.load(__UpperCamelCase )
return list(__UpperCamelCase )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_videomae_config(__UpperCamelCase )
if "finetuned" in model_name:
SCREAMING_SNAKE_CASE_ = VideoMAEForVideoClassification(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = VideoMAEForPreTraining(__UpperCamelCase )
# download original checkpoint, hosted on Google Drive
SCREAMING_SNAKE_CASE_ = "pytorch_model.bin"
gdown.cached_download(__UpperCamelCase , __UpperCamelCase , quiet=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )
if "model" in files:
SCREAMING_SNAKE_CASE_ = files["model"]
else:
SCREAMING_SNAKE_CASE_ = files["module"]
SCREAMING_SNAKE_CASE_ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# verify model on basic input
SCREAMING_SNAKE_CASE_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
SCREAMING_SNAKE_CASE_ = prepare_video()
SCREAMING_SNAKE_CASE_ = image_processor(__UpperCamelCase , return_tensors="pt" )
if "finetuned" not in model_name:
SCREAMING_SNAKE_CASE_ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
SCREAMING_SNAKE_CASE_ = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_7_4] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 4_0_0] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_7_4] )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1_7_4] )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
SCREAMING_SNAKE_CASE_ = outputs.loss
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__UpperCamelCase , organization="nielsr" )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 140
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase__ : Optional[int] = logging.getLogger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[Any] = "masked_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=0 ,snake_case__="topK" ,snake_case__="constant" ,snake_case__=0.0 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : str = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = pruning_method
SCREAMING_SNAKE_CASE_ : str = mask_init
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_scale
| 685
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for attribute in key.split('.' ):
_SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCamelCase, lowerCamelCase )
if weight_type is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(lowerCamelCase, lowerCamelCase ).shape
else:
_SCREAMING_SNAKE_CASE : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE : Optional[int] = value
else:
_SCREAMING_SNAKE_CASE : Optional[int] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Tuple = fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE : Any = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE : Tuple = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, hf_model.config.feat_extract_norm == 'group', )
_SCREAMING_SNAKE_CASE : int = True
else:
for key, mapped_key in MAPPING.items():
_SCREAMING_SNAKE_CASE : int = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_SCREAMING_SNAKE_CASE : List[Any] = True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE : Dict = name.split(lowerCamelCase )[0].split('.' )[-2]
_SCREAMING_SNAKE_CASE : str = mapped_key.replace('*', lowerCamelCase )
if "weight_g" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = 'weight_g'
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = 'weight_v'
elif "weight" in name:
_SCREAMING_SNAKE_CASE : Any = 'weight'
elif "bias" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = 'bias'
else:
_SCREAMING_SNAKE_CASE : Tuple = None
set_recursively(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = full_name.split('conv_layers.' )[-1]
_SCREAMING_SNAKE_CASE : List[str] = name.split('.' )
_SCREAMING_SNAKE_CASE : List[Any] = int(items[0] )
_SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_SCREAMING_SNAKE_CASE : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_SCREAMING_SNAKE_CASE : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_SCREAMING_SNAKE_CASE : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=True ):
if config_path is not None:
_SCREAMING_SNAKE_CASE : List[Any] = HubertConfig.from_pretrained(lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
_SCREAMING_SNAKE_CASE : int = Dictionary.load(lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_SCREAMING_SNAKE_CASE : Any = target_dict.pad_index
_SCREAMING_SNAKE_CASE : int = target_dict.bos_index
_SCREAMING_SNAKE_CASE : Optional[Any] = target_dict.eos_index
_SCREAMING_SNAKE_CASE : List[str] = len(target_dict.symbols )
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowerCamelCase, 'vocab.json' )
if not os.path.isdir(lowerCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase ) )
return
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase )
with open(lowerCamelCase, 'w', encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices, lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = WavaVecaCTCTokenizer(
lowerCamelCase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='|', do_lower_case=lowerCamelCase, )
_SCREAMING_SNAKE_CASE : str = True if config.feat_extract_norm == 'layer' else False
_SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=lowerCamelCase, return_attention_mask=lowerCamelCase, )
_SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor(feature_extractor=lowerCamelCase, tokenizer=lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = HubertForCTC(lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : int = HubertModel(lowerCamelCase )
if is_finetuned:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_SCREAMING_SNAKE_CASE : Optional[int] = model[0].eval()
recursively_load_weights(lowerCamelCase, lowerCamelCase, lowerCamelCase )
hf_wavavec.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 621
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
'''simple docstring'''
@staticmethod
def lowerCamelCase_ ( *snake_case , **snake_case ) -> str:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Union[str, Any]:
_UpperCAmelCase = ObjectDetectionPipeline(model=snake_case , image_processor=snake_case )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case , {
'score': ANY(snake_case ),
'label': ANY(snake_case ),
'box': {'xmin': ANY(snake_case ), 'ymin': ANY(snake_case ), 'xmax': ANY(snake_case ), 'ymax': ANY(snake_case )},
} , )
import datasets
_UpperCAmelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
_UpperCAmelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_UpperCAmelCase = object_detector(snake_case , threshold=0.0 )
self.assertEqual(len(snake_case ) , len(snake_case ) )
for outputs in batch_outputs:
self.assertGreater(len(snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case , {
'score': ANY(snake_case ),
'label': ANY(snake_case ),
'box': {'xmin': ANY(snake_case ), 'ymin': ANY(snake_case ), 'xmax': ANY(snake_case ), 'ymax': ANY(snake_case )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def lowerCamelCase_ ( self ) -> List[Any]:
pass
@require_torch
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(snake_case )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(snake_case )
_UpperCAmelCase = ObjectDetectionPipeline(model=snake_case , feature_extractor=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
_UpperCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = 'facebook/detr-resnet-50'
_UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(snake_case )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(snake_case )
_UpperCAmelCase = ObjectDetectionPipeline(model=snake_case , feature_extractor=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_UpperCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = 'facebook/detr-resnet-50'
_UpperCAmelCase = pipeline('object-detection' , model=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_UpperCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = 0.9985
_UpperCAmelCase = 'facebook/detr-resnet-50'
_UpperCAmelCase = pipeline('object-detection' , model=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=snake_case )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = 'Narsil/layoutlmv3-finetuned-funsd'
_UpperCAmelCase = 0.9993
_UpperCAmelCase = pipeline('object-detection' , model=snake_case , threshold=snake_case )
_UpperCAmelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 573
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger()
@dataclass
class a__ :
snake_case__ = 42
snake_case__ = field(default_factory=UpperCamelCase_ )
snake_case__ = field(default_factory=UpperCamelCase_ )
def __UpperCamelCase ( self : int ,a__ : int ,a__ : Tensor ,a__ : Tensor) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = len(list(m.modules())) == 1 or isinstance(a__ ,nn.Convad) or isinstance(a__ ,nn.BatchNormad)
if has_not_submodules:
self.traced.append(a__)
def __call__( self : List[Any] ,a__ : Tensor) -> List[str]:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(a__)
[x.remove() for x in self.handles]
return self
@property
def __UpperCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
return list(filter(lambda a__: len(list(x.state_dict().keys())) > 0 ,self.traced))
@dataclass
class a__ :
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0
snake_case__ = field(default_factory=UpperCamelCase_ )
snake_case__ = field(default_factory=UpperCamelCase_ )
def __call__( self : Dict ,a__ : Tensor) -> int:
"""simple docstring"""
_lowerCAmelCase:Any = Tracker(self.dest)(a__).parametrized
_lowerCAmelCase:Optional[Any] = Tracker(self.src)(a__).parametrized
_lowerCAmelCase:Tuple = list(filter(lambda a__: type(a__) not in self.src_skip ,a__))
_lowerCAmelCase:Dict = list(filter(lambda a__: type(a__) not in self.dest_skip ,a__))
if len(a__) != len(a__):
raise Exception(
F'Numbers of operations are different. Source module has {len(a__)} operations while'
F' destination module has {len(a__)}.')
for dest_m, src_m in zip(a__ ,a__):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
def UpperCAmelCase ( snake_case : str , snake_case : ResNetConfig , snake_case : Path , snake_case : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCAmelCase:str = timm.create_model(snake_case , pretrained=snake_case ).eval()
_lowerCAmelCase:Optional[Any] = ResNetForImageClassification(snake_case ).eval()
_lowerCAmelCase:Tuple = ModuleTransfer(src=snake_case , dest=snake_case )
_lowerCAmelCase:Tuple = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case )
assert torch.allclose(from_model(snake_case ) , our_model(snake_case ).logits ), "The model logits don't match the original one."
_lowerCAmelCase:Optional[Any] = F'resnet{"-".join(name.split("resnet" ) )}'
print(snake_case )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=snake_case , )
# we can use the convnext one
_lowerCAmelCase:Optional[Any] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=snake_case , )
print(F'Pushed {checkpoint_name}' )
def UpperCAmelCase ( snake_case : Path , snake_case : str = None , snake_case : bool = True ):
_lowerCAmelCase:Dict = '''imagenet-1k-id2label.json'''
_lowerCAmelCase:Optional[int] = 1000
_lowerCAmelCase:Optional[int] = (1, num_labels)
_lowerCAmelCase:Optional[Any] = '''huggingface/label-files'''
_lowerCAmelCase:List[Any] = num_labels
_lowerCAmelCase:Union[str, Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowerCAmelCase:Optional[Any] = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase:Tuple = idalabel
_lowerCAmelCase:List[str] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase:Optional[int] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
_lowerCAmelCase:str = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 439
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class a__ ( UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ = 1
@register_to_config
def __init__( self : Union[str, Any] ,a__ : List[str]=2000 ,a__ : Tuple=0.1 ,a__ : Union[str, Any]=20 ,a__ : int=1E-3) -> Any:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = None
_lowerCAmelCase:List[Any] = None
_lowerCAmelCase:Dict = None
def __UpperCamelCase ( self : int ,a__ : Any ,a__ : Union[str, torch.device] = None) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:str = torch.linspace(1 ,self.config.sampling_eps ,a__ ,device=a__)
def __UpperCamelCase ( self : Union[str, Any] ,a__ : str ,a__ : List[Any] ,a__ : Union[str, Any] ,a__ : Any=None) -> Tuple:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''')
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowerCAmelCase:Union[str, Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowerCAmelCase:Dict = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
_lowerCAmelCase:Optional[Any] = std.flatten()
while len(std.shape) < len(score.shape):
_lowerCAmelCase:str = std.unsqueeze(-1)
_lowerCAmelCase:Optional[int] = -score / std
# compute
_lowerCAmelCase:Optional[Any] = -1.0 / len(self.timesteps)
_lowerCAmelCase:Union[str, Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowerCAmelCase:Any = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
_lowerCAmelCase:List[str] = beta_t.unsqueeze(-1)
_lowerCAmelCase:Tuple = -0.5 * beta_t * x
_lowerCAmelCase:str = torch.sqrt(a__)
_lowerCAmelCase:Union[str, Any] = drift - diffusion**2 * score
_lowerCAmelCase:List[str] = x + drift * dt
# add noise
_lowerCAmelCase:Any = randn_tensor(x.shape ,layout=x.layout ,generator=a__ ,device=x.device ,dtype=x.dtype)
_lowerCAmelCase:Union[str, Any] = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__( self : int) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 439
| 1
|
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
A__ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase_ )
if number < 1:
A__ : Any = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowercase_ )
A__ : Optional[int] = 1
for i in range(1 , lowercase_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 456
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A_ : str = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Dict:
A__ : Union[str, Any] = test_results.split(""" """ )
A__ : Union[str, Any] = 0
A__ : Union[str, Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ : List[str] = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCamelCase (lowercase_: Any ) -> Optional[int]:
A__ : Dict = {}
A__ : Union[str, Any] = None
A__ : List[str] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""" , lowercase_ ):
A__ : Tuple = True
A__ : Dict = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
A__ : Union[str, Any] = line
A__ : List[str] = False
return failures
class _a :
'''simple docstring'''
def __init__( self , A__ , A__ ):
A__ : Optional[Any] = title
A__ : Tuple = doc_test_results["""time_spent"""].split(""",""" )[0]
A__ : str = doc_test_results["""success"""]
A__ : Optional[int] = doc_test_results["""failures"""]
A__ : int = self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ : Optional[int] = doc_test_results
@property
def __A ( self ):
A__ : Tuple = [self._time_spent]
A__ : Tuple = 0
for time in time_spent:
A__ : Dict = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(A__ ) == 1:
A__ : Dict = [0, 0, time_parts[0]]
A__ , A__ , A__ : Optional[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
A__ , A__ , A__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"""{int(A__ )}h{int(A__ )}m{int(A__ )}s"""
@property
def __A ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __A ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __A ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __A ( self ):
A__ : Tuple = 40
A__ : Dict = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(A__ , A__ )}
A__ : str = """"""
for category, failures in category_failures.items():
if len(A__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(A__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __A ( self ):
A__ : Tuple = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(A__ )
@staticmethod
def __A ( ):
A__ : List[str] = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(A__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=A__ , )
def __A ( self ):
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
A__ : Any = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
A__ : Any = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=A__ , )
def __A ( self , A__ , A__ , A__ , A__ ):
A__ : Tuple = """"""
for key, value in failures.items():
A__ : Any = value[:200] + """ [Truncated]""" if len(A__ ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
A__ : Optional[Any] = job_name
A__ : Union[str, Any] = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
A__ : Dict = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __A ( self ):
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
A__ : List[Any] = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
A__ : List[Any] = sorted(self.doc_test_results.items() , key=lambda A__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
A__ : Optional[int] = F"""*Num failures* :{len(job_result['failed'] )} \n"""
A__ : Any = job_result["""failures"""]
A__ : List[str] = self.get_reply_blocks(A__ , A__ , A__ , text=A__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F"""Results for {job}""" , blocks=A__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def UpperCamelCase () -> Dict:
A__ : int = os.environ["""GITHUB_RUN_ID"""]
A__ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
A__ : Optional[int] = requests.get(lowercase_ ).json()
A__ : List[str] = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Dict = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase_ ):
A__ : str = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , lowercase_ )
return {}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : List[Any] = {}
if os.path.exists(lowercase_ ):
A__ : List[str] = os.listdir(lowercase_ )
for file in files:
try:
with open(os.path.join(lowercase_ , lowercase_ ) , encoding="""utf-8""" ) as f:
A__ : Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(lowercase_ , lowercase_ )}.""" ) from e
return _artifact
def UpperCamelCase () -> Union[str, Any]:
class _a :
'''simple docstring'''
def __init__( self , A__ ):
A__ : str = name
A__ : Optional[int] = []
def __str__( self ):
return self.name
def __A ( self , A__ ):
self.paths.append({"""name""": self.name, """path""": path} )
A__ : Dict[str, Artifact] = {}
A__ : int = filter(os.path.isdir , os.listdir() )
for directory in directories:
A__ : Dict = directory
if artifact_name not in _available_artifacts:
A__ : int = Artifact(lowercase_ )
_available_artifacts[artifact_name].add_path(lowercase_ )
return _available_artifacts
if __name__ == "__main__":
A_ : str = get_job_links()
A_ : Dict = retrieve_available_artifacts()
A_ : int = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A_ : int = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A_ : Optional[Any] = github_actions_job_links.get('run_doctests')
A_ : str = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
A_ : List[Any] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
A_ , A_ , A_ : Any = handle_test_results(artifact['stats'])
A_ : Union[str, Any] = failed
A_ : int = success
A_ : Optional[Any] = time_spent[1:-1] + ', '
A_ : Optional[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
A_ : Dict = line.replace('FAILED ', '')
A_ : Dict = line.split()[0].replace('\n', '')
if "::" in line:
A_ , A_ : Dict = line.split('::')
else:
A_ , A_ : Dict = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A_ : List[str] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A_ : Optional[int] = all_failures[test] if test in all_failures else 'N/A'
A_ : List[str] = failure
break
A_ : Optional[Any] = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 456
| 1
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : list, _lowerCAmelCase : list, _lowerCAmelCase : int ):
"""simple docstring"""
_a = len(_lowerCAmelCase )
_a = [[0] * n for i in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
_a = y_points[i]
for i in range(2, _lowerCAmelCase ):
for j in range(_lowerCAmelCase, _lowerCAmelCase ):
_a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 1
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
A__ : Any = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
A__ : Tuple = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
A__ : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
return {
"matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ ) ),
}
| 13
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
snake_case__ : int = AutoTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase )
snake_case__ : Tuple = tokenizer('''This is me''' , return_tensors='''pt''' )
snake_case__ : Dict = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case__ : Optional[Any] = model.generate(**lowerCamelCase )
snake_case__ : str = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
snake_case__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case__ : Union[str, Any] = model_reloaded.generate(**lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase ) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Any = '''hf-internal-testing/tiny-random-t5'''
snake_case__ : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase ):
model.save_pretrained(lowerCamelCase )
snake_case__ : List[Any] = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase )
| 261
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : int ={
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =[
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 15
|
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15
| 1
|
from __future__ import annotations
snake_case_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
snake_case_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
lowercase__ : int = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : float = -1
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if arr[i] < arr[j]:
lowercase__ : Optional[Any] = arr[j]
break
result.append(SCREAMING_SNAKE_CASE_ )
return result
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
lowercase__ : List[str] = []
for i, outer in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase__ : Union[str, Any] = inner
break
result.append(SCREAMING_SNAKE_CASE_ )
return result
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
lowercase__ : str = len(SCREAMING_SNAKE_CASE_ )
lowercase__ : list[float] = []
lowercase__ : list[float] = [-1] * arr_size
for index in reversed(range(SCREAMING_SNAKE_CASE_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase__ : List[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
snake_case_ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 164
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a = 6):
lowercase__ : Node | None = None
lowercase__ : Node | None = None
self.create_linked_list(a)
def snake_case_ ( self , a):
lowercase__ : str = Node()
lowercase__ : str = current_node
lowercase__ : str = current_node
lowercase__ : Any = current_node
for _ in range(1 , a):
lowercase__ : List[str] = Node()
lowercase__ : Optional[Any] = current_node
lowercase__ : Union[str, Any] = previous_node
lowercase__ : List[str] = current_node
lowercase__ : Optional[int] = self.front
lowercase__ : str = previous_node
def snake_case_ ( self):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def snake_case_ ( self):
self.check_can_perform_operation()
return self.front.data if self.front else None
def snake_case_ ( self , a):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ : Dict = self.rear.next
if self.rear:
lowercase__ : Any = data
def snake_case_ ( self):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ : Any = self.front.data
lowercase__ : Optional[Any] = None
return data
lowercase__ : Optional[Any] = self.front
lowercase__ : str = old_front.next
lowercase__ : Union[str, Any] = old_front.data
lowercase__ : List[str] = None
return data
def snake_case_ ( self):
if self.is_empty():
raise Exception('Empty Queue')
def snake_case_ ( self):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue')
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Any | None = None
lowercase__ : Node | None = None
lowercase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
SCREAMING_SNAKE_CASE_:str = 5
SCREAMING_SNAKE_CASE_:List[str] = 10
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = SpeechaTextTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : str = True
def _lowerCAmelCase ( self ):
super().setUp()
A : Any = sp.SentencePieceProcessor()
spm_model.Load(lowerCamelCase__ )
A : List[Any] = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCamelCase__ ) )]
A : List[str] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : str = Path(self.tmpdirname )
save_json(lowerCamelCase__, save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase__, save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
A : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Optional[Any] = """<pad>"""
A : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<s>""" )
self.assertEqual(vocab_keys[1], """<pad>""" )
self.assertEqual(vocab_keys[-1], """j""" )
self.assertEqual(len(lowerCamelCase__ ), 1001 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 1001 )
def _lowerCAmelCase ( self ):
A : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
A : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), [289, 50, 14, 174, 386], )
A : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""], )
A : int = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
A : int = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""], )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : int = {"""input_ids""": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__, model_name="""facebook/s2t-small-mustc-en-de-st""", revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""", )
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = "valhalla/s2t_mustc_multilinguial_medium"
__lowerCamelCase : Any = "C'est trop cool"
__lowerCamelCase : Optional[Any] = "Esto es genial"
@classmethod
def _lowerCAmelCase ( cls ):
A : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def _lowerCAmelCase ( self ):
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""], 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""], 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""], 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""], 11 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.tokenizer.vocab_size, 1_0000 )
def _lowerCAmelCase ( self ):
self.assertIn(lowerCamelCase__, self.tokenizer.all_special_ids )
A : int = [ES_CODE, 4, 1601, 47, 7647, 2]
A : Union[str, Any] = self.tokenizer.decode(lowerCamelCase__, skip_special_tokens=lowerCamelCase__ )
A : int = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : str = """fr"""
A : Union[str, Any] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0], lowerCamelCase__ )
self.assertEqual(encoded[-1], self.tokenizer.eos_token_id )
def _lowerCAmelCase ( self ):
A : str = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] )
A : Union[str, Any] = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
| 520
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A : List[Any] = [p / w for p, w in zip(_lowerCAmelCase , _lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A : Union[str, Any] = sorted(_lowerCAmelCase )
# declaring useful variables
A : str = len(_lowerCAmelCase )
A : Union[str, Any] = 0
A : Any = 0
A : Dict = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A : str = sorted_profit_by_weight[length - i - 1]
A : List[Any] = profit_by_weight.index(_lowerCAmelCase )
A : Tuple = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = [int(x) for x in input("""Input profits separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_:Optional[int] = [int(x) for x in input("""Input weights separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_:int = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 520
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _lowerCAmelCase ( __a ):
def __init__( self ) -> List[Any]:
lowerCAmelCase_ = []
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
self.events.append("on_init_end" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
self.events.append("on_train_begin" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
self.events.append("on_train_end" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> str:
self.events.append("on_epoch_begin" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
self.events.append("on_epoch_end" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Tuple:
self.events.append("on_step_begin" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
self.events.append("on_step_end" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
self.events.append("on_evaluate" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Any:
self.events.append("on_predict" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
self.events.append("on_save" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
self.events.append("on_log" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Tuple:
self.events.append("on_prediction_step" )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> int:
lowerCAmelCase_ = tempfile.mkdtemp()
def __a ( self ) -> Any:
shutil.rmtree(self.output_dir )
def __a ( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=64 , _UpperCamelCase=64 , _UpperCamelCase=None , _UpperCamelCase=False , **_UpperCamelCase ) -> str:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCAmelCase_ = RegressionDataset(length=UpperCAmelCase_ )
lowerCAmelCase_ = RegressionDataset(length=UpperCAmelCase_ )
lowerCAmelCase_ = RegressionModelConfig(a=UpperCAmelCase_ , b=UpperCAmelCase_ )
lowerCAmelCase_ = RegressionPreTrainedModel(UpperCAmelCase_ )
lowerCAmelCase_ = TrainingArguments(self.output_dir , disable_tqdm=UpperCAmelCase_ , report_to=[] , **UpperCAmelCase_ )
return Trainer(
UpperCAmelCase_ , UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , callbacks=UpperCAmelCase_ , )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
# Order doesn't matter
lowerCAmelCase_ = sorted(UpperCAmelCase_ , key=lambda _UpperCamelCase : cb.__name__ if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cb.__class__.__name__ )
lowerCAmelCase_ = sorted(UpperCAmelCase_ , key=lambda _UpperCamelCase : cb.__name__ if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(UpperCAmelCase_ , cba.__class__ )
elif not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(cba.__class__ , UpperCAmelCase_ )
else:
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __a ( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = ["on_init_end", "on_train_begin"]
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(trainer.get_eval_dataloader() )
lowerCAmelCase_ = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(UpperCAmelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.get_trainer()
lowerCAmelCase_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
# Callbacks passed at init are added to the default callbacks
lowerCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCAmelCase_ = self.get_trainer(disable_tqdm=UpperCAmelCase_ )
lowerCAmelCase_ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCAmelCase_ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCAmelCase_ )
expected_callbacks.remove(UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
lowerCAmelCase_ = self.get_trainer()
lowerCAmelCase_ = trainer.pop_callback(UpperCAmelCase_ )
self.assertEqual(cb.__class__ , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
trainer.add_callback(UpperCAmelCase_ )
expected_callbacks.insert(0 , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
# We can also add, pop, or remove by instance
lowerCAmelCase_ = self.get_trainer()
lowerCAmelCase_ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCAmelCase_ )
expected_callbacks.remove(UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
lowerCAmelCase_ = self.get_trainer()
lowerCAmelCase_ = trainer.callback_handler.callbacks[0]
lowerCAmelCase_ = trainer.pop_callback(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
trainer.add_callback(UpperCAmelCase_ )
expected_callbacks.insert(0 , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
def __a ( self ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=UpperCAmelCase_ )
lowerCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
# Independent log/save/eval
lowerCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
lowerCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
lowerCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
lowerCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
lowerCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
lowerCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
# A bit of everything
lowerCAmelCase_ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
lowerCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
lowerCAmelCase_ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCAmelCase_ ) in warn_mock.call_args[0][0]
| 290
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
SCREAMING_SNAKE_CASE__ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 267
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class a ( __lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict = 'ibert'
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=30_522 , lowerCamelCase__ : Tuple=768 , lowerCamelCase__ : Optional[int]=12 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : int=3_072 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Any=512 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : List[str]=1e-1_2 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Any=0 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : Tuple="absolute" , lowerCamelCase__ : int=False , lowerCamelCase__ : Optional[int]="none" , **lowerCamelCase__ : Any , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = quant_mode
__lowercase = force_dequant
class a ( __lowercase ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 706
|
import argparse
import copy
def _A( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__lowercase = {}
with open(UpperCamelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__lowercase = []
_list.append([line.split()[1], line.split()[2]] )
__lowercase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__lowercase = []
_list.append([line.split()[0], line.split()[2]] )
__lowercase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _A( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(UpperCamelCase__ ) as f:
__lowercase = f.read(1 )
__lowercase = start_node
__lowercase = []
__lowercase = start_node
__lowercase = 0
while visiting not in first_solution:
__lowercase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase__ ) and k[0] not in first_solution:
__lowercase = k[1]
__lowercase = k[0]
first_solution.append(UpperCamelCase__ )
__lowercase = distance_of_first_solution + int(UpperCamelCase__ )
__lowercase = best_node
first_solution.append(UpperCamelCase__ )
__lowercase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__lowercase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def _A( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__lowercase = []
for n in solution[1:-1]:
__lowercase = solution.index(UpperCamelCase__ )
for kn in solution[1:-1]:
__lowercase = solution.index(UpperCamelCase__ )
if n == kn:
continue
__lowercase = copy.deepcopy(UpperCamelCase__ )
__lowercase = kn
__lowercase = n
__lowercase = 0
for k in _tmp[:-1]:
__lowercase = _tmp[_tmp.index(UpperCamelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__lowercase = distance + int(i[1] )
_tmp.append(UpperCamelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__lowercase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _A( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
__lowercase = 1
__lowercase = first_solution
__lowercase = []
__lowercase = distance_of_first_solution
__lowercase = solution
while count <= iters:
__lowercase = find_neighborhood(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = 0
__lowercase = neighborhood[index_of_best_solution]
__lowercase = len(UpperCamelCase__ ) - 1
__lowercase = False
while not found:
__lowercase = 0
while i < len(UpperCamelCase__ ):
if best_solution[i] != solution[i]:
__lowercase = best_solution[i]
__lowercase = solution[i]
break
__lowercase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__lowercase = True
__lowercase = best_solution[:-1]
__lowercase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__lowercase = cost
__lowercase = solution
else:
__lowercase = index_of_best_solution + 1
__lowercase = neighborhood[index_of_best_solution]
if len(UpperCamelCase__ ) >= size:
tabu_list.pop(0 )
__lowercase = count + 1
return best_solution_ever, best_cost
def _A( UpperCamelCase__ : List[Any]=None ) -> int:
'''simple docstring'''
__lowercase = generate_neighbours(args.File )
__lowercase , __lowercase = generate_first_solution(
args.File , UpperCamelCase__ )
__lowercase , __lowercase = tabu_search(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 362
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = 'swin2sr'
__snake_case :Any = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : int=3 , _lowerCAmelCase : str=180 , _lowerCAmelCase : Dict=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=2.0 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Union[str, Any]=1e-5 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=1.0 , _lowerCAmelCase : List[str]="1conv" , _lowerCAmelCase : Dict="pixelshuffle" , **_lowerCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(_lowerCAmelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 80
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'open-llama'
def __init__( self : str ,_UpperCAmelCase : int=100000 ,_UpperCAmelCase : List[str]=4096 ,_UpperCAmelCase : Dict=11008 ,_UpperCAmelCase : int=32 ,_UpperCAmelCase : Union[str, Any]=32 ,_UpperCAmelCase : List[str]="silu" ,_UpperCAmelCase : List[Any]=2048 ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : int=1E-6 ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Dict=0 ,_UpperCAmelCase : Optional[Any]=1 ,_UpperCAmelCase : Dict=2 ,_UpperCAmelCase : Tuple=False ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : List[str]=True ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Union[str, Any]=None ,**_UpperCAmelCase : Optional[int] ,):
_a : str = vocab_size
_a : str = max_position_embeddings
_a : List[str] = hidden_size
_a : Any = intermediate_size
_a : Union[str, Any] = num_hidden_layers
_a : Tuple = num_attention_heads
_a : int = hidden_act
_a : str = initializer_range
_a : Any = rms_norm_eps
_a : Dict = use_cache
_a : Optional[int] = kwargs.pop(
'use_memorry_efficient_attention' ,_UpperCAmelCase )
_a : int = hidden_dropout_prob
_a : int = attention_dropout_prob
_a : Union[str, Any] = use_stable_embedding
_a : str = shared_input_output_embedding
_a : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,tie_word_embeddings=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Optional[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
_a : Optional[Any] = self.rope_scaling.get('type' ,_UpperCAmelCase )
_a : Optional[Any] = self.rope_scaling.get('factor' ,_UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 358
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
SCREAMING_SNAKE_CASE = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode('utf-8').split()
SCREAMING_SNAKE_CASE = '|'.join(sys.argv[1:])
SCREAMING_SNAKE_CASE = re.compile(RF'^({joined_dirs}).*?\.py$')
SCREAMING_SNAKE_CASE = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 186
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A_ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , _A) -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : Dict = model
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : List[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels)
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( __A : str , __A : str , __A : str ) -> List[str]:
# load longformer model from model identifier
_UpperCAmelCase : Optional[Any] = LongformerModel.from_pretrained(__A )
_UpperCAmelCase : Optional[Any] = LightningModel(__A )
_UpperCAmelCase : Optional[int] = torch.load(__A , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
_UpperCAmelCase : int = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 186
| 1
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
def _lowercase ( __UpperCamelCase : list ):
snake_case__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
snake_case__ = True
for i in range(0 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
snake_case__ , snake_case__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
snake_case__ = False
for i in range(1 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
snake_case__ , snake_case__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
snake_case__ = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCAmelCase : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase : int = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 214
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__snake_case : Any = datasets.utils.logging.get_logger(__name__)
__snake_case : str = ['names', 'prefix']
__snake_case : Optional[int] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__snake_case : Optional[Any] = ['encoding_errors', 'on_bad_lines']
__snake_case : Union[str, Any] = ['date_format']
@dataclass
class lowerCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__snake_case = ","
__snake_case = None
__snake_case = "infer"
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = True
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = False
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = True
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = None
__snake_case = "."
__snake_case = None
__snake_case = '"'
__snake_case = 0
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = True
__snake_case = True
__snake_case = 0
__snake_case = True
__snake_case = False
__snake_case = None
__snake_case = 1_0000
__snake_case = None
__snake_case = "strict"
__snake_case = "error"
__snake_case = None
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.delimiter is not None:
A__ : str =self.delimiter
if self.column_names is not None:
A__ : int =self.column_names
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
A__ : Union[str, Any] ={
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__snake_case = CsvConfig
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
A__ : Union[str, Any] =dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase_ , (str, list, tuple) ):
A__ : Any =data_files
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : List[Any] =[files]
A__ : int =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : Optional[int] =[]
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =[files]
A__ : Optional[Any] =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase_ , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : Dict , lowerCAmelCase_ : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
A__ : Dict =self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase_ ) for feature in self.config.features.values() ):
# cheaper cast
A__ : int =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A__ : List[Any] =table_cast(lowerCAmelCase_ , lowerCAmelCase_ )
return pa_table
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple =self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A__ : str =(
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase_ ) ):
A__ : List[str] =pd.read_csv(lowerCAmelCase_ , iterator=lowerCAmelCase_ , dtype=lowerCAmelCase_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase_ ):
A__ : Dict =pa.Table.from_pandas(lowerCAmelCase_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase_ )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(lowerCAmelCase_ )}: {e}" )
raise
| 687
|
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : torch.FloatTensor
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : List[str] , a : int = 3 , a : int = 3 , a : Tuple[str] = ("DownEncoderBlock2D",) , a : Tuple[str] = ("UpDecoderBlock2D",) , a : Tuple[int] = (64,) , a : int = 1 , a : str = "silu" , a : int = 3 , a : int = 32 , a : int = 2_56 , a : int = 32 , a : Optional[int] = None , a : float = 0.1_82_15 , a : str = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=a , out_channels=a , down_block_types=a , block_out_channels=a , layers_per_block=a , act_fn=a , norm_num_groups=a , double_z=a , )
__lowerCamelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCamelCase = nn.Convad(a , a , 1 )
__lowerCamelCase = VectorQuantizer(a , a , beta=0.25 , remap=a , sane_index_shape=a )
__lowerCamelCase = nn.Convad(a , a , 1 )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=a , out_channels=a , up_block_types=a , block_out_channels=a , layers_per_block=a , act_fn=a , norm_num_groups=a , norm_type=a , )
@apply_forward_hook
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : torch.FloatTensor , a : bool = True ):
"""simple docstring"""
__lowerCamelCase = self.encoder(a )
__lowerCamelCase = self.quant_conv(a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=a )
@apply_forward_hook
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : bool = False , a : bool = True ):
"""simple docstring"""
if not force_not_quantize:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.quantize(a )
else:
__lowerCamelCase = h
__lowerCamelCase = self.post_quant_conv(a )
__lowerCamelCase = self.decoder(a , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : torch.FloatTensor , a : bool = True ):
"""simple docstring"""
__lowerCamelCase = sample
__lowerCamelCase = self.encode(a ).latents
__lowerCamelCase = self.decode(a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a )
| 546
|
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[int]:
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(UpperCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
__UpperCAmelCase =parse_flag_from_env("RUN_SLOW", default=False)
__UpperCAmelCase =parse_flag_from_env("RUN_REMOTE", default=False)
__UpperCAmelCase =parse_flag_from_env("RUN_LOCAL", default=True)
__UpperCAmelCase =parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
__UpperCAmelCase =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
__UpperCAmelCase =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
__UpperCAmelCase =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
__UpperCAmelCase =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
__UpperCAmelCase =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
__UpperCAmelCase =pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
__UpperCAmelCase =pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
try:
import faiss # noqa
except ImportError:
__lowerCamelCase = unittest.skip('''test requires faiss''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
try:
import regex # noqa
except ImportError:
__lowerCamelCase = unittest.skip('''test requires regex''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
try:
import elasticsearch # noqa
except ImportError:
__lowerCamelCase = unittest.skip('''test requires elasticsearch''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[Any]:
try:
import sqlalchemy # noqa
except ImportError:
__lowerCamelCase = unittest.skip('''test requires sqlalchemy''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
if not config.TORCH_AVAILABLE:
__lowerCamelCase = unittest.skip('''test requires PyTorch''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
if not config.TF_AVAILABLE:
__lowerCamelCase = unittest.skip('''test requires TensorFlow''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
if not config.JAX_AVAILABLE:
__lowerCamelCase = unittest.skip('''test requires JAX''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
if not config.PIL_AVAILABLE:
__lowerCamelCase = unittest.skip('''test requires Pillow''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(UpperCamelCase__ )
else:
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(UpperCamelCase__ )
else:
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(UpperCamelCase__ )
else:
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Dict:
def _require_spacy_model(UpperCamelCase__ ):
try:
import spacy # noqa F401
spacy.load(UpperCamelCase__ )
except ImportError:
return unittest.skip('''test requires spacy''' )(UpperCamelCase__ )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(UpperCamelCase__ ) )(UpperCamelCase__ )
else:
return test_case
return _require_spacy_model
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(UpperCamelCase__ )
else:
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(UpperCamelCase__ )
else:
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
if not _run_slow_tests or _run_slow_tests == 0:
__lowerCamelCase = unittest.skip('''test is slow''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
if not _run_local_tests or _run_local_tests == 0:
__lowerCamelCase = unittest.skip('''test is local''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
if not _run_packaged_tests or _run_packaged_tests == 0:
__lowerCamelCase = unittest.skip('''test is packaged''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
if not _run_remote_tests or _run_remote_tests == 0:
__lowerCamelCase = unittest.skip('''test requires remote''' )(UpperCamelCase__ )
return test_case
def __lowerCAmelCase ( *UpperCamelCase__ ) -> List[Any]:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(UpperCamelCase__ ) and name.startswith('''test''' ):
for decorator in decorators:
__lowerCamelCase = decorator(UpperCamelCase__ )
setattr(cls , UpperCamelCase__ , UpperCamelCase__ )
return cls
return decorate
class a__ ( UpperCAmelCase__ ):
pass
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : int =0
lowerCamelCase : List[str] =1
lowerCamelCase : Union[str, Any] =2
@contextmanager
def __lowerCAmelCase ( UpperCamelCase__=OfflineSimulationMode.CONNECTION_FAILS , UpperCamelCase__=1E-16 ) -> str:
__lowerCamelCase = requests.Session().request
def timeout_request(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
# Change the url to an invalid url so that the connection hangs
__lowerCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__lowerCamelCase = timeout
try:
return online_request(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__lowerCamelCase = url
__lowerCamelCase = e.args[0]
__lowerCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' , f"""OfflineMock[{url}]""" ),)
__lowerCamelCase = (max_retry_error,)
raise
def raise_connection_error(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=UpperCamelCase__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , UpperCamelCase__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , UpperCamelCase__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __lowerCAmelCase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
__lowerCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*UpperCamelCase__ , **UpperCamelCase__ ) as tmp_dir:
try:
os.chdir(UpperCamelCase__ )
yield
finally:
os.chdir(UpperCamelCase__ )
@contextmanager
def __lowerCAmelCase ( ) -> List[Any]:
import gc
gc.collect()
__lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowerCAmelCase ( ) -> Any:
import gc
gc.collect()
__lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
return deepcopy(UpperCamelCase__ ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(UpperCamelCase__ ).integers(0 , 1_00 , 10 ).tolist()
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ):
try:
return func(*UpperCamelCase__ , **UpperCamelCase__ )
except HTTPError as err:
if str(UpperCamelCase__ ).startswith('''500''' ) or str(UpperCamelCase__ ).startswith('''502''' ):
pytest.xfail(str(UpperCamelCase__ ) )
raise err
return decorator.decorator(_wrapper , UpperCamelCase__ )
class a__ :
def __init__( self : Optional[Any] , a : int , a : List[Any] , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(UpperCamelCase__ )
else:
break
async def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(UpperCamelCase__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="" ):
__lowerCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(UpperCamelCase__ )
if not quiet:
print(UpperCamelCase__ , UpperCamelCase__ , file=UpperCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stderr , label='''stderr:''' ) ),
] , timeout=UpperCamelCase__ , )
return _RunOutput(await p.wait() , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=1_80 , UpperCamelCase__=False , UpperCamelCase__=True ) -> _RunOutput:
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(UpperCamelCase__ , env=UpperCamelCase__ , stdin=UpperCamelCase__ , timeout=UpperCamelCase__ , quiet=UpperCamelCase__ , echo=UpperCamelCase__ ) )
__lowerCamelCase = ''' '''.join(UpperCamelCase__ )
if result.returncode > 0:
__lowerCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def __lowerCAmelCase ( ) -> List[str]:
__lowerCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__lowerCamelCase = re.sub(r'''^gw''' , '''''' , UpperCamelCase__ , 0 , re.M )
return int(UpperCamelCase__ )
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCamelCase = 2_95_00
__lowerCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 546
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = 'gptj'
lowercase : Tuple = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __A=5_0400 , __A=2048 , __A=4096 , __A=28 , __A=16 , __A=64 , __A=None , __A="gelu_new" , __A=0.0 , __A=0.0 , __A=0.0 , __A=1E-5 , __A=0.02 , __A=True , __A=5_0256 , __A=5_0256 , __A=False , **__A , ) -> Union[str, Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =n_positions
_lowerCAmelCase =n_embd
_lowerCAmelCase =n_layer
_lowerCAmelCase =n_head
_lowerCAmelCase =n_inner
_lowerCAmelCase =rotary_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =resid_pdrop
_lowerCAmelCase =embd_pdrop
_lowerCAmelCase =attn_pdrop
_lowerCAmelCase =layer_norm_epsilon
_lowerCAmelCase =initializer_range
_lowerCAmelCase =use_cache
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =eos_token_id
super().__init__(
bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A = "default" , __A = None , __A = False , ) -> Dict:
super().__init__(__A , task=__A , patching_specs=__A , use_past=__A )
if not getattr(self._config , 'pad_token_id' , __A ):
# TODO: how to do that better?
_lowerCAmelCase =0
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
_lowerCAmelCase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
_lowerCAmelCase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase__ ( self ) -> int:
return self._config.n_layer
@property
def UpperCamelCase__ ( self ) -> int:
return self._config.n_head
def UpperCamelCase__ ( self , __A , __A = -1 , __A = -1 , __A = False , __A = None , ) -> Mapping[str, Any]:
_lowerCAmelCase =super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase =seqlen + 2
_lowerCAmelCase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase =[
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowerCAmelCase =common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase =ordered_inputs['attention_mask'].dtype
_lowerCAmelCase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ) -> int:
return 13
| 58
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'roberta'
def __init__( self , __UpperCAmelCase=50_265 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_size
SCREAMING_SNAKE_CASE_ : str =num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] =num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict =hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] =intermediate_size
SCREAMING_SNAKE_CASE_ : Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any =type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE_ : Tuple =layer_norm_eps
SCREAMING_SNAKE_CASE_ : str =position_embedding_type
SCREAMING_SNAKE_CASE_ : Dict =use_cache
SCREAMING_SNAKE_CASE_ : List[Any] =classifier_dropout
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Optional[int] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ : int ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 220
|
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] =[]
for i in range(len(lowerCAmelCase_ ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE_ : Dict =True
for j in range(lowerCAmelCase_ ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE_ : Tuple =False
break
if match_found:
position.append(lowerCAmelCase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 220
| 1
|
from typing import Any
def A ( lowercase ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
UpperCamelCase = [input_list.count(lowercase ) for value in input_list]
UpperCamelCase = max(lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : Dict , lowercase : List[Any] , lowercase : Dict=True , lowercase : int="pt" ):
'''simple docstring'''
lowerCamelCase_ = {'add_prefix_space': True} if isinstance(lowercase , lowercase ) and not line.startswith(' ' ) else {}
lowerCamelCase_ = padding_side
return tokenizer(
[line] , max_length=lowercase , padding='max_length' if pad_to_max_length else None , truncation=lowercase , return_tensors=lowercase , add_special_tokens=lowercase , **lowercase , )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : List[str] , lowercase : Dict=None , ):
'''simple docstring'''
lowerCamelCase_ = input_ids.ne(lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , A_ : List[Any] , A_ : List[Any] , A_ : List[str] , A_ : Dict , A_ : Dict="train" , A_ : Dict=None , A_ : Optional[Any]=None , A_ : Any=None , A_ : Union[str, Any]="" , ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = Path(A_ ).joinpath(type_path + '.source' )
lowerCamelCase_ = Path(A_ ).joinpath(type_path + '.target' )
lowerCamelCase_ = self.get_char_lens(self.src_file )
lowerCamelCase_ = max_source_length
lowerCamelCase_ = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowerCamelCase_ = tokenizer
lowerCamelCase_ = prefix
if n_obs is not None:
lowerCamelCase_ = self.src_lens[:n_obs]
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
def __len__( self : int ) -> int:
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Tuple , A_ : Dict ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCamelCase_ = index + 1 # linecache starts at 1
lowerCamelCase_ = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip('\n' )
lowerCamelCase_ = linecache.getline(str(self.tgt_file ) , A_ ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCamelCase_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
lowerCamelCase_ = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
lowerCamelCase_ = encode_line(A_ , A_ , self.max_source_length , 'right' )
lowerCamelCase_ = encode_line(A_ , A_ , self.max_target_length , 'right' )
lowerCamelCase_ = source_inputs['input_ids'].squeeze()
lowerCamelCase_ = target_inputs['input_ids'].squeeze()
lowerCamelCase_ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( A_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def a__ ( self : Optional[Any] , A_ : Optional[Any] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCamelCase_ = torch.stack([x['input_ids'] for x in batch] )
lowerCamelCase_ = torch.stack([x['attention_mask'] for x in batch] )
lowerCamelCase_ = torch.stack([x['decoder_input_ids'] for x in batch] )
lowerCamelCase_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
lowerCamelCase_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
lowerCamelCase_ = trim_batch(A_ , A_ )
lowerCamelCase_ , lowerCamelCase_ = trim_batch(A_ , A_ , attention_mask=A_ )
lowerCamelCase_ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCamelCase : Optional[int] = getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_git_info()
save_json(lowercase , os.path.join(lowercase , 'git_log.json' ) )
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Tuple , lowercase : str=4 , **lowercase : Optional[Any] ):
'''simple docstring'''
with open(lowercase , 'w' ) as f:
json.dump(lowercase , lowercase , indent=lowercase , **lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
with open(lowercase ) as f:
return json.load(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = git.Repo(search_parent_directories=lowercase )
lowerCamelCase_ = {
'repo_id': str(lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Callable , lowercase : Iterable ):
'''simple docstring'''
return list(map(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
with open(lowercase , 'wb' ) as f:
return pickle.dump(lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
def remove_articles(lowercase : int ):
return re.sub(r'\b(a|an|the)\b' , ' ' , lowercase )
def white_space_fix(lowercase : List[str] ):
return " ".join(text.split() )
def remove_punc(lowercase : List[Any] ):
lowerCamelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = normalize_answer(lowercase ).split()
lowerCamelCase_ = normalize_answer(lowercase ).split()
lowerCamelCase_ = Counter(lowercase ) & Counter(lowercase )
lowerCamelCase_ = sum(common.values() )
if num_same == 0:
return 0
lowerCamelCase_ = 1.0 * num_same / len(lowercase )
lowerCamelCase_ = 1.0 * num_same / len(lowercase )
lowerCamelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str ):
'''simple docstring'''
return normalize_answer(lowercase ) == normalize_answer(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[str] ):
'''simple docstring'''
assert len(lowercase ) == len(lowercase )
lowerCamelCase_ = 0
for hypo, pred in zip(lowercase , lowercase ):
em += exact_match_score(lowercase , lowercase )
if len(lowercase ) > 0:
em /= len(lowercase )
return {"em": em}
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCamelCase_ = 'dropout_rate'
for p in extra_params:
if getattr(lowercase , lowercase , lowercase ):
if not hasattr(lowercase , lowercase ) and not hasattr(lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowercase ) )
delattr(lowercase , lowercase )
continue
lowerCamelCase_ = p if hasattr(lowercase , lowercase ) else equivalent_param[p]
setattr(lowercase , lowercase , getattr(lowercase , lowercase ) )
delattr(lowercase , lowercase )
return hparams, config
| 70
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCamelCase_ = InstructBlipProcessor(A_ , A_ , A_ )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Optional[int] , **A_ : Optional[int] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).tokenizer
def a__ ( self : List[str] , **A_ : str ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def a__ ( self : Tuple , **A_ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).qformer_tokenizer
def a__ ( self : str ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
lowerCamelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
self.assertIsInstance(processor.qformer_tokenizer , A_ )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(A_ , return_tensors='np' )
lowerCamelCase_ = processor(images=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = processor(text=A_ )
lowerCamelCase_ = tokenizer(A_ , return_token_type_ids=A_ )
lowerCamelCase_ = qformer_tokenizer(A_ , return_token_type_ids=A_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(A_ )
lowerCamelCase_ = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 70
| 1
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
__lowerCamelCase : Any = namedtuple("""covid_data""", """cases deaths recovered""")
def A__ ( _a : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
snake_case__ : Any ="""//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(_a ).content ).xpath(_a ) )
__lowerCamelCase : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 448
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
__lowerCamelCase : Optional[int] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 448
| 1
|
'''simple docstring'''
from typing import Any
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_validation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
# Creates data structures and fill initial step
_snake_case = {}
_snake_case = {}
for state in states_space:
_snake_case = observations_space[0]
_snake_case = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_snake_case = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
_snake_case = observations_space[o]
_snake_case = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_snake_case = ""
_snake_case = -1
for k_state in states_space:
_snake_case = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_snake_case = probability
_snake_case = k_state
# Update probabilities and pointers dicts
_snake_case = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_snake_case = arg_max
# The final observation
_snake_case = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1]
# argmax for given final observation
_snake_case = ""
_snake_case = -1
for k_state in states_space:
_snake_case = probabilities[(k_state, final_observation)]
if probability > max_probability:
_snake_case = probability
_snake_case = k_state
_snake_case = arg_max
# Process pointers backwards
_snake_case = last_state
_snake_case = []
for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
result.append(_SCREAMING_SNAKE_CASE )
_snake_case = pointers[previous, observations_space[o]]
result.reverse()
return result
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_validate_not_empty(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
_validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_validate_dicts(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_validate_list(_SCREAMING_SNAKE_CASE , """observations_space""" )
_validate_list(_SCREAMING_SNAKE_CASE , """states_space""" )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{var_name} must be a list"""
raise ValueError(_SCREAMING_SNAKE_CASE )
else:
for x in _object:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{var_name} must be a list of strings"""
raise ValueError(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_validate_dict(_SCREAMING_SNAKE_CASE , """initial_probabilities""" , _SCREAMING_SNAKE_CASE )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , """transition_probabilities""" )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , """emission_probabilities""" )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for x in _object.values():
_validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{var_name} must be a dict"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ):
_snake_case = f"""{var_name} all keys must be strings"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ):
_snake_case = "nested dictionary " if nested else ""
_snake_case = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 585
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCamelCase : Tuple , lowerCamelCase : Dict=False , lowerCamelCase : Any=False ) -> Union[str, Any]:
lowerCAmelCase__ : str = "backbone." if is_semantic else ""
lowerCAmelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", "beit.embeddings.cls_token"),
(F"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(F"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
(F"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : int=False , lowerCamelCase : Union[str, Any]=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ : Optional[int] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCAmelCase__ : Any = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
lowerCAmelCase__ : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
lowerCAmelCase__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Optional[Any] = q_bias
lowerCAmelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : List[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
lowerCAmelCase__ : Union[str, Any] = gamma_a
lowerCAmelCase__ : Optional[Any] = gamma_a
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple ) -> List[Any]:
lowerCAmelCase__ : Dict = dct.pop(lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = val
def lowercase__ ( ) -> Any:
lowerCAmelCase__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : Tuple = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : List[str]=False ) -> int:
lowerCAmelCase__ : Optional[int] = False if "rvlcdip" in checkpoint_url else True
lowerCAmelCase__ : Any = BeitConfig(use_absolute_position_embeddings=lowerCamelCase , use_mask_token=lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 1_0_2_4
lowerCAmelCase__ : Any = 4_0_9_6
lowerCAmelCase__ : int = 2_4
lowerCAmelCase__ : Tuple = 1_6
# labels
if "rvlcdip" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 1_6
lowerCAmelCase__ : str = "huggingface/label-files"
lowerCAmelCase__ : List[str] = "rvlcdip-id2label.json"
lowerCAmelCase__ : Tuple = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ : Tuple = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
lowerCAmelCase__ : Union[str, Any] = create_rename_keys(lowerCamelCase , has_lm_head=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase , has_lm_head=lowerCamelCase )
# load HuggingFace model
lowerCAmelCase__ : Union[str, Any] = BeitForMaskedImageModeling(lowerCamelCase ) if has_lm_head else BeitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# Check outputs on an image
lowerCAmelCase__ : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase )
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=lowerCamelCase , return_tensors="pt" )
lowerCAmelCase__ : Any = encoding["pixel_values"]
lowerCAmelCase__ : Optional[Any] = model(lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# verify logits
lowerCAmelCase__ : str = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(lowerCamelCase ), "Shape of logits not as expected"
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
if has_lm_head:
lowerCAmelCase__ : List[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCAmelCase__ : Tuple = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = old_name
if "patch_embed" in old_name:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = old_name.split("." )
if layer == "0":
_UpperCAmelCase : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
_UpperCAmelCase : Union[str, Any] = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_UpperCAmelCase : Optional[int] = old_name.replace("3" , "convolution2" )
else:
_UpperCAmelCase : List[str] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = R"\b\d{2}\b"
if bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ):
_UpperCAmelCase : List[Any] = re.search(R"\d\.\d\d." , __lowerCAmelCase ).group()
else:
_UpperCAmelCase : Dict = re.search(R"\d\.\d." , __lowerCAmelCase ).group()
if int(match[0] ) < 6:
_UpperCAmelCase : int = old_name.replace(__lowerCAmelCase , "" )
_UpperCAmelCase : Tuple = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_UpperCAmelCase : Union[str, Any] = "intermediate_stages." + trimmed_name
else:
_UpperCAmelCase : Tuple = old_name.replace(__lowerCAmelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
_UpperCAmelCase : Union[str, Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_UpperCAmelCase : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
_UpperCAmelCase : Any = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_UpperCAmelCase : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_UpperCAmelCase : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_UpperCAmelCase : Optional[int] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_UpperCAmelCase : Any = trimmed_name.replace("fc2" , "linear_out" )
_UpperCAmelCase : List[str] = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_UpperCAmelCase : int = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_UpperCAmelCase : Any = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_UpperCAmelCase : int = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_UpperCAmelCase : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_UpperCAmelCase : List[Any] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_UpperCAmelCase : List[str] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_UpperCAmelCase : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_UpperCAmelCase : int = new_name.replace("norm" , "layernorm" )
_UpperCAmelCase : Optional[int] = "efficientformer." + new_name
else:
_UpperCAmelCase : List[str] = "efficientformer.encoder." + new_name
return new_name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key in checkpoint.copy().keys():
_UpperCAmelCase : Optional[Any] = checkpoint.pop(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = val
return checkpoint
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
_UpperCAmelCase : List[Any] = EfficientFormerConfig.from_json_file(__lowerCAmelCase )
_UpperCAmelCase : List[str] = EfficientFormerForImageClassificationWithTeacher(__lowerCAmelCase )
_UpperCAmelCase : str = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_UpperCAmelCase : List[str] = config.depths[-1] - config.num_metaad_blocks + 1
_UpperCAmelCase : Optional[Any] = convert_torch_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
_UpperCAmelCase : List[str] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_UpperCAmelCase : Dict = prepare_img()
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : List[str] = 224
_UpperCAmelCase : int = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_UpperCAmelCase : List[Any] = processor(images=__lowerCAmelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
_UpperCAmelCase : Dict = Compose(
[
Resize(__lowerCAmelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
Normalize(__lowerCAmelCase , __lowerCAmelCase ),
] )
_UpperCAmelCase : List[str] = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = model(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.logits
_UpperCAmelCase : Optional[Any] = (1, 1_000)
if "l1" in model_name:
_UpperCAmelCase : List[str] = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_UpperCAmelCase : Union[str, Any] = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_UpperCAmelCase : List[str] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowerCAmelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
class __magic_name__ ( _lowercase ):
UpperCamelCase_ = '''encoder-decoder'''
UpperCamelCase_ = True
def __init__( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowercase: List[str] = kwargs.pop('''encoder''' )
_lowercase: List[str] = encoder_config.pop('''model_type''' )
_lowercase: Union[str, Any] = kwargs.pop('''decoder''' )
_lowercase: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_lowercase: List[str] = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
_lowercase: Optional[Any] = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
_lowercase: Optional[Any] = True
@classmethod
def lowercase_ ( cls , A_ , A_ , **A_ ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_lowercase: int = True
_lowercase: List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCamelCase )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Dict = copy.deepcopy(self.__dict__ )
_lowercase: Union[str, Any] = self.encoder.to_dict()
_lowercase: int = self.decoder.to_dict()
_lowercase: str = self.__class__.model_type
return output
| 353
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""gpt-neox-20b""": 2_048,
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple="<|endoftext|>" , __UpperCamelCase : int="<|endoftext|>" , __UpperCamelCase : Dict="<|endoftext|>" , __UpperCamelCase : Union[str, Any]=False , **__UpperCamelCase : Union[str, Any] , ) -> Any:
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space:
_UpperCamelCase = getattr(__UpperCamelCase , pre_tok_state.pop('''type''' ) )
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**__UpperCamelCase )
_UpperCamelCase = add_prefix_space
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : "Conversation" ) -> List[int]:
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 420
| 0
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def lowerCamelCase__ ( _a , _a , _a=8):
SCREAMING_SNAKE_CASE : List[Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
SCREAMING_SNAKE_CASE : Optional[Any] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
SCREAMING_SNAKE_CASE : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCamelCase ( self : List[str] , a : List[str] , a : List[str] , a : Optional[int] , a : Optional[Any] , a : Tuple , a : Dict ) -> Optional[Any]:
"""simple docstring"""
if latents is None:
SCREAMING_SNAKE_CASE : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE : Tuple = latents.to(a )
SCREAMING_SNAKE_CASE : str = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase ( self : List[str] , a : Dict , a : Dict , a : List[str] , a : Union[str, Any] , a : str=None , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=77 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : Dict = text_inputs.input_ids
SCREAMING_SNAKE_CASE : int = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE : str = text_input_ids.to(a )
SCREAMING_SNAKE_CASE : List[Any] = text_inputs.attention_mask.to(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_encoder(
input_ids=a , attention_mask=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = prompt_embeds.repeat_interleave(a , dim=0 )
SCREAMING_SNAKE_CASE : Any = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
SCREAMING_SNAKE_CASE : Tuple = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="
F" {type(a )}." )
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE : Any = negative_prompt
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
a , padding="max_length" , max_length=77 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : Tuple = uncond_input.input_ids.to(a )
SCREAMING_SNAKE_CASE : str = uncond_input.attention_mask.to(a )
SCREAMING_SNAKE_CASE : Dict = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE : Union[str, Any] = negative_prompt_embeds.repeat(1 , a )
SCREAMING_SNAKE_CASE : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
SCREAMING_SNAKE_CASE : str = uncond_text_encoder_hidden_states.shape[1]
SCREAMING_SNAKE_CASE : Union[str, Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
SCREAMING_SNAKE_CASE : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
SCREAMING_SNAKE_CASE : Dict = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE : Dict = torch.cat([negative_prompt_embeds, prompt_embeds] )
SCREAMING_SNAKE_CASE : List[str] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
SCREAMING_SNAKE_CASE : str = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __UpperCamelCase ( self : List[Any] , a : Any=0 ) -> Any:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE : List[str] = torch.device(F"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE : Any = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any]=0 ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
SCREAMING_SNAKE_CASE : List[str] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE : List[Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
SCREAMING_SNAKE_CASE : int = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE : Tuple = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 512 , a : int = 512 , a : int = 100 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ) -> Tuple:
"""simple docstring"""
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Dict = 1
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE : List[str] = len(a )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(a )}" )
SCREAMING_SNAKE_CASE : Tuple = self._execution_device
SCREAMING_SNAKE_CASE : Any = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE : Dict = guidance_scale > 1.0
SCREAMING_SNAKE_CASE : Dict = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Any = torch.cat(a , dim=0 )
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : Any = image_embeds.repeat_interleave(a , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = negative_image_embeds.repeat_interleave(a , dim=0 )
SCREAMING_SNAKE_CASE : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.in_channels
SCREAMING_SNAKE_CASE : List[Any] = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Union[str, Any] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
SCREAMING_SNAKE_CASE : Dict = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE : List[Any] = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
SCREAMING_SNAKE_CASE : Dict = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE : int = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : Tuple = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 703
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , "embed_dim" ) )
self.parent.assertTrue(hasattr(a , "num_heads" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int , a : List[str]=13 , a : Optional[int]=64 , a : List[str]=3 , a : Dict=[16, 48, 96] , a : List[Any]=[1, 3, 6] , a : int=[1, 2, 10] , a : List[str]=[7, 3, 3] , a : List[Any]=[4, 2, 2] , a : Tuple=[2, 1, 1] , a : Union[str, Any]=[2, 2, 2] , a : List[str]=[False, False, True] , a : int=[0.0, 0.0, 0.0] , a : Dict=0.02 , a : List[Any]=1e-12 , a : Tuple=True , a : Optional[int]=True , a : Union[str, Any]=2 , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Dict = patch_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = patch_stride
SCREAMING_SNAKE_CASE : str = patch_padding
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : List[str] = num_labels
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = stride_kv
SCREAMING_SNAKE_CASE : Tuple = depth
SCREAMING_SNAKE_CASE : List[Any] = cls_token
SCREAMING_SNAKE_CASE : List[Any] = attention_drop_rate
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Tuple , a : str , a : List[str] , a : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = TFCvtModel(config=a )
SCREAMING_SNAKE_CASE : Any = model(a , training=a )
SCREAMING_SNAKE_CASE : List[Any] = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = TFCvtForImageClassification(a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , labels=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ =(
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = TFCvtModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = TFCvtConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(a )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
def check_hidden_states_output(a : List[str] , a : Union[str, Any] , a : List[str] ):
SCREAMING_SNAKE_CASE : Tuple = model_class(a )
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE : int = len(self.model_tester.depth )
self.assertEqual(len(a ) , a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(a , a , a )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = TFCvtModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=a , return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE : List[str] = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Tuple = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1e-4 ) )
| 193
| 0
|
"""simple docstring"""
lowerCAmelCase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : str = input('Enter message: ' )
_SCREAMING_SNAKE_CASE : Optional[int] = input('Enter key [alphanumeric]: ' )
_SCREAMING_SNAKE_CASE : Tuple = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
_SCREAMING_SNAKE_CASE : Dict = 'encrypt'
_SCREAMING_SNAKE_CASE : Optional[int] = encrypt_message(lowerCamelCase, lowerCamelCase )
elif mode.lower().startswith('d' ):
_SCREAMING_SNAKE_CASE : List[Any] = 'decrypt'
_SCREAMING_SNAKE_CASE : Dict = decrypt_message(lowerCamelCase, lowerCamelCase )
print(f"""\n{mode.title()}ed message:""" )
print(lowerCamelCase )
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
return translate_message(lowerCamelCase, lowerCamelCase, 'encrypt' )
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
return translate_message(lowerCamelCase, lowerCamelCase, 'decrypt' )
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : int = key.upper()
for symbol in message:
_SCREAMING_SNAKE_CASE : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = 0
else:
translated.append(lowerCamelCase )
return "".join(lowerCamelCase )
if __name__ == "__main__":
main()
| 621
|
"""simple docstring"""
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=_A ,)
assert hasattr(self ,'env' )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = {
'enabled': True,
'processes_per_host': 8,
}
_lowerCAmelCase : Dict = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowerCAmelCase : List[str] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowerCAmelCase : Dict = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=_A ,instance_type=self.instance_type ,debugger_hook_config=_A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=_A ,py_version='py36' ,)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase : str = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,_A )
| 16
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase(_lowercase ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="last" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) -> int:
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_lengths
a__ = use_token_type_ids
a__ = use_labels
a__ = gelu_activation
a__ = sinusoidal_embeddings
a__ = causal
a__ = asm
a__ = n_langs
a__ = vocab_size
a__ = n_special
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = summary_type
a__ = use_proj
a__ = scope
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_input_lengths:
a__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , 2 ).float()
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ) -> int:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
a__ = FlaubertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , lengths=__SCREAMING_SNAKE_CASE , langs=__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE , langs=__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
a__ = FlaubertWithLMHeadModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
a__ = FlaubertForQuestionAnsweringSimple(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
a__ = FlaubertForQuestionAnswering(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE )
a__ = model(
__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , cls_index=__SCREAMING_SNAKE_CASE , is_impossible=__SCREAMING_SNAKE_CASE , p_mask=__SCREAMING_SNAKE_CASE , )
a__ = model(
__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , cls_index=__SCREAMING_SNAKE_CASE , is_impossible=__SCREAMING_SNAKE_CASE , )
((a__) , ) = result_with_labels.to_tuple()
a__ = model(__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE )
((a__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
a__ = FlaubertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
a__ = self.num_labels
a__ = FlaubertForTokenClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
a__ = self.num_choices
a__ = FlaubertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase(_lowercase , _lowercase , unittest.TestCase ):
__snake_case: Any = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case: Optional[Any] = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
a__ = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
a__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = FlaubertModelTester(self )
a__ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , emb_dim=3_7 )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__SCREAMING_SNAKE_CASE )
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = FlaubertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a__ = True
a__ = model_class(config=__SCREAMING_SNAKE_CASE )
a__ = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ = torch.jit.trace(
__SCREAMING_SNAKE_CASE , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , 'traced_model.pt' ) )
a__ = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , 'traced_model.pt' ) , map_location=__SCREAMING_SNAKE_CASE )
loaded(inputs_dict['input_ids'].to(__SCREAMING_SNAKE_CASE ) , inputs_dict['attention_mask'].to(__SCREAMING_SNAKE_CASE ) )
@require_torch
class lowercase(unittest.TestCase ):
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
a__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
a__ = model(__SCREAMING_SNAKE_CASE )[0]
a__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
a__ = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 273
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> int:
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
a__ = LlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
a__ = True
a__ = LlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
a__ = LlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
a__ = True
a__ = True
a__ = LlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] , dim=-1 )
a__ = torch.cat([input_mask, next_mask] , dim=-1 )
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
# select random slice
a__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase(_lowercase , _lowercase , _lowercase , unittest.TestCase ):
__snake_case: Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__snake_case: Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
__snake_case: List[Any] = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case: List[Any] = False
__snake_case: List[str] = False
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = LlamaModelTester(self )
a__ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a__ = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'single_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a__ = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'multi_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a__ = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = ids_tensor([1, 1_0] , config.vocab_size )
a__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
a__ = LlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
a__ = {'type': scaling_type, 'factor': 10.0}
a__ = LlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
@require_torch
class lowercase(unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
a__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a__ = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
a__ = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
a__ = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
a__ = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
a__ = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
a__ = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
a__ = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# fmt: off
a__ = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
a__ = 'Simply put, the theory of relativity states that '
a__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
a__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=__SCREAMING_SNAKE_CASE )
# greedy generation outputs
a__ = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=6_4 , top_p=__SCREAMING_SNAKE_CASE , temperature=1 , do_sample=__SCREAMING_SNAKE_CASE )
a__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 273
| 1
|
UpperCamelCase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 152
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return getitem, k
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return setitem, k, v
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return delitem, k
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ):
try:
return fun(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ), None
except Exception as e:
return None, e
UpperCamelCase = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
UpperCamelCase = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
UpperCamelCase = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
UpperCamelCase = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
UpperCamelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCamelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = HashMap(initial_block_size=4 )
A_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE ):
A_ , A_ : Union[str, Any] = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
A_ , A_ : int = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE ) == str(SCREAMING_SNAKE_CASE )
assert set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
assert set(my.items() ) == set(py.items() )
def _SCREAMING_SNAKE_CASE ( ):
def is_public(SCREAMING_SNAKE_CASE ) -> bool:
return not name.startswith('''_''' )
A_ : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE )}
A_ : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE )}
assert dict_public_names > hash_public_names
| 152
| 1
|
from __future__ import annotations
from fractions import Fraction
def __UpperCAmelCase ( __a : int ,__a : int ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __UpperCAmelCase ( __a : int ) -> list[str]:
"""simple docstring"""
_a : Dict = []
_a : Tuple = 11
_a : int = int('''1''' + '''0''' * digit_len )
for num in range(__a ,__a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__a ,__a ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
_a : int = 10
return solutions
def __UpperCAmelCase ( __a : int = 2 ) -> int:
"""simple docstring"""
_a : Union[str, Any] = 1.0
for fraction in fraction_list(__a ):
_a : Any = Fraction(__a )
result *= frac.denominator / frac.numerator
return int(__a )
if __name__ == "__main__":
print(solution())
| 14
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCamelCase__ = False
try:
UpperCamelCase__ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _UpperCAmelCase :
def __init__( self : List[Any] , a : str = None , a : list = [] ):
'''simple docstring'''
lowercase_ : Optional[Any] = 0
lowercase_ : Dict = choices
lowercase_ : List[str] = prompt
if sys.platform == "win32":
lowercase_ : List[Any] = "*"
else:
lowercase_ : Union[str, Any] = "➔ "
def lowerCAmelCase__ ( self : int , a : Optional[int] , a : str = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , a )
else:
forceWrite(self.choices[index] , a )
def lowerCAmelCase__ ( self : List[Any] , a : int ):
'''simple docstring'''
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowerCAmelCase__ ( self : str , a : Direction , a : int = 1 ):
'''simple docstring'''
lowercase_ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(1_0 )] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = int(chr(self.current_selection ) )
lowercase_ : Tuple = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def lowerCAmelCase__ ( self : Tuple , a : int = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowercase_ : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ : List[str] = int(builtins.input() )
except ValueError:
lowercase_ : List[Any] = default_choice
else:
lowercase_ : Dict = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a , "\n" )
return choice
| 620
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _SCREAMING_SNAKE_CASE ( A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(A , A )
if k.startswith('encoder' ):
__snake_case : Any = k.replace('.attn' , '.self_attn' )
__snake_case : int = k.replace('norm1' , 'self_attn_layer_norm' )
__snake_case : Optional[int] = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__snake_case : int = k.replace('norm1' , 'self_attn_layer_norm' )
__snake_case : Dict = k.replace('norm2' , 'encoder_attn_layer_norm' )
__snake_case : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__snake_case : Any = sd.pop(A )
__snake_case : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__snake_case : int = v
__A = ['''START''']
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( A : str , A : List[Any] , A : List[Any] ) -> Any:
"""simple docstring"""
__snake_case : str = torch.load(A , map_location='cpu' )
__snake_case : int = model['model']
__snake_case : str = BlenderbotConfig.from_json_file(A )
__snake_case : Dict = BlenderbotForConditionalGeneration(A )
__snake_case : Any = m.model.state_dict().keys()
__snake_case : List[str] = []
__snake_case : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Tuple = rename_state_dict_key(A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : Optional[int] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A )
m.model.load_state_dict(A , strict=A )
m.half()
m.save_pretrained(A )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 61
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 61
| 1
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
debug_launcher(test_script.main )
def _a ( self ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 102
|
'''simple docstring'''
class _lowercase :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCAmelCase = size
__lowerCAmelCase = [0] * size
__lowerCAmelCase = [0] * size
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return index | (index + 1)
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return (index & (index + 1)) - 1
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCAmelCase = value
while index < self.size:
__lowerCAmelCase = self.get_prev(SCREAMING_SNAKE_CASE__ ) + 1
if current_left_border == index:
__lowerCAmelCase = value
else:
__lowerCAmelCase = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.get_next(SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
right -= 1 # Because of right is exclusive
__lowerCAmelCase = 0
while left <= right:
__lowerCAmelCase = self.get_prev(SCREAMING_SNAKE_CASE__ )
if left <= current_left:
__lowerCAmelCase = max(SCREAMING_SNAKE_CASE__ , self.tree[right] )
__lowerCAmelCase = current_left
else:
__lowerCAmelCase = max(SCREAMING_SNAKE_CASE__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 427
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCAmelCase_ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
UpperCAmelCase_ : ClassVar[Features] = Features({} )
UpperCAmelCase_ : str = "text"
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 695
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695
| 1
|
'''simple docstring'''
from collections.abc import Callable
def SCREAMING_SNAKE_CASE_ ( __A : Callable[[float], float] , __A : float , __A : float ) -> float:
_SCREAMING_SNAKE_CASE = a
_SCREAMING_SNAKE_CASE = b
if function(__A ) == 0: # one of the a or b is a root for the function
return a
elif function(__A ) == 0:
return b
elif (
function(__A ) * function(__A ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
_SCREAMING_SNAKE_CASE = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__A ) == 0:
return mid
elif function(__A ) * function(__A ) < 0:
_SCREAMING_SNAKE_CASE = mid
else:
_SCREAMING_SNAKE_CASE = mid
_SCREAMING_SNAKE_CASE = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE_ ( __A : float ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 418
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''mra'''
def __init__( self : str , __lowerCamelCase : Tuple=5_0_2_6_5 , __lowerCamelCase : List[str]=7_6_8 , __lowerCamelCase : Union[str, Any]=1_2 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=3_0_7_2 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=5_1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : List[Any]="absolute" , __lowerCamelCase : int=4 , __lowerCamelCase : List[Any]="full" , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Any=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=2 , **__lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = block_per_row
_SCREAMING_SNAKE_CASE = approx_mode
_SCREAMING_SNAKE_CASE = initial_prior_first_n_blocks
_SCREAMING_SNAKE_CASE = initial_prior_diagonal_n_blocks
| 418
| 1
|
'''simple docstring'''
import heapq
def lowerCamelCase_ ( UpperCamelCase__ : dict ):
'''simple docstring'''
UpperCamelCase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(UpperCamelCase__, [-1 * len(UpperCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCamelCase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCamelCase__ = heapq.heappop(UpperCamelCase__ )[1][0]
chosen_vertices.add(UpperCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCamelCase__ = elem[1][1].index(UpperCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(UpperCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 719
|
import csv
import tweepy
# Twitter API credentials
lowercase = """"""
lowercase = """"""
lowercase = """"""
lowercase = """"""
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = tweepy.OAuthHandler(UpperCamelCase__, UpperCamelCase__ )
auth.set_access_token(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = tweepy.API(UpperCamelCase__ )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ = api.user_timeline(screen_name=UpperCamelCase__, count=200 )
# save most recent tweets
alltweets.extend(UpperCamelCase__ )
# save the id of the oldest tweet less one
UpperCamelCase__ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCamelCase__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ = api.user_timeline(
screen_name=UpperCamelCase__, count=200, max_id=UpperCamelCase__ )
# save most recent tweets
alltweets.extend(UpperCamelCase__ )
# update the id of the oldest tweet less one
UpperCamelCase__ = alltweets[-1].id - 1
print(F"""...{len(UpperCamelCase__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""", '''w''' ) as f:
UpperCamelCase__ = csv.writer(UpperCamelCase__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(UpperCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 591
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = '''layoutlmv3'''
def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any]=50_265 , UpperCamelCase_: Tuple=768 , UpperCamelCase_: int=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: List[Any]=3_072 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: str=512 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: str=0.02 , UpperCamelCase_: int=1E-5 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: List[str]=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=1_024 , UpperCamelCase_: List[str]=128 , UpperCamelCase_: List[str]=128 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Any=32 , UpperCamelCase_: Any=128 , UpperCamelCase_: Optional[int]=64 , UpperCamelCase_: str=256 , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[int]=224 , UpperCamelCase_: Any=3 , UpperCamelCase_: int=16 , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
vocab_size=UpperCamelCase_ , hidden_size=UpperCamelCase_ , num_hidden_layers=UpperCamelCase_ , num_attention_heads=UpperCamelCase_ , intermediate_size=UpperCamelCase_ , hidden_act=UpperCamelCase_ , hidden_dropout_prob=UpperCamelCase_ , attention_probs_dropout_prob=UpperCamelCase_ , max_position_embeddings=UpperCamelCase_ , type_vocab_size=UpperCamelCase_ , initializer_range=UpperCamelCase_ , layer_norm_eps=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = max_ad_position_embeddings
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = has_relative_attention_bias
lowercase__ = rel_pos_bins
lowercase__ = max_rel_pos
lowercase__ = has_spatial_attention_bias
lowercase__ = rel_ad_pos_bins
lowercase__ = max_rel_ad_pos
lowercase__ = text_embed
lowercase__ = visual_embed
lowercase__ = input_size
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = classifier_dropout
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = version.parse('''1.12''' )
@property
def lowerCamelCase_ ( self: int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCamelCase_ ( self: Any ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return 12
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: "ProcessorMixin" , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 40 , UpperCamelCase_: int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase__ = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = dict(
processor(
UpperCamelCase_ , text=UpperCamelCase_ , boxes=UpperCamelCase_ , return_tensors=UpperCamelCase_ , ) )
return inputs
| 43
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258
| 0
|
'''simple docstring'''
def UpperCAmelCase ( A : str ):
SCREAMING_SNAKE_CASE : Dict = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
SCREAMING_SNAKE_CASE : Tuple = hex_num[0] == '''-'''
if is_negative:
SCREAMING_SNAKE_CASE : int = hex_num[1:]
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = int(A , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
SCREAMING_SNAKE_CASE : Any = ''''''
while int_num > 0:
SCREAMING_SNAKE_CASE : Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def UpperCAmelCase ( A : int , A : Tuple , A : Any ):
SCREAMING_SNAKE_CASE : int = hf_hub_url(repo_id=A , path=A , revision=A )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A )}"""
| 464
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="openai/whisper-base"
UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase ="transcriber"
UpperCamelCase =WhisperProcessor
UpperCamelCase =WhisperForConditionalGeneration
UpperCamelCase =["audio"]
UpperCamelCase =["text"]
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).input_features
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
return self.model.generate(inputs=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0]
| 76
|
"""simple docstring"""
__A : Optional[int] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 602
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase =logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "resnet-50" in model_name:
_UpperCAmelCase : str =ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_UpperCAmelCase : str =ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_UpperCAmelCase : Tuple =DetrConfig(use_timm_backbone=__lowerCamelCase , backbone_config=__lowerCamelCase )
# set label attributes
_UpperCAmelCase : str ='panoptic' in model_name
if is_panoptic:
_UpperCAmelCase : List[str] =2_5_0
else:
_UpperCAmelCase : int =9_1
_UpperCAmelCase : List[Any] ='huggingface/label-files'
_UpperCAmelCase : Optional[Any] ='coco-detection-id2label.json'
_UpperCAmelCase : Any =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase : int ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Dict =idalabel
_UpperCAmelCase : Optional[int] ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
'''simple docstring'''
_UpperCAmelCase : List[str] =[]
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =state_dict.pop(__lowerCamelCase )
_UpperCAmelCase : Tuple =val
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Dict=False ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =''
if is_panoptic:
_UpperCAmelCase : str ='detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase : List[Any] =state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCAmelCase : Union[str, Any] =state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : str =in_proj_weight[:2_5_6, :]
_UpperCAmelCase : Dict =in_proj_bias[:2_5_6]
_UpperCAmelCase : Any =in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase : Dict =in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase : Union[str, Any] =in_proj_weight[-2_5_6:, :]
_UpperCAmelCase : Dict =in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase : List[Any] =state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCAmelCase : Union[str, Any] =state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Union[str, Any] =in_proj_weight[:2_5_6, :]
_UpperCAmelCase : Optional[int] =in_proj_bias[:2_5_6]
_UpperCAmelCase : Dict =in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase : str =in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase : List[Any] =in_proj_weight[-2_5_6:, :]
_UpperCAmelCase : Union[str, Any] =in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
_UpperCAmelCase : Union[str, Any] =state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_UpperCAmelCase : int =state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCAmelCase : List[Any] =in_proj_weight_cross_attn[:2_5_6, :]
_UpperCAmelCase : Any =in_proj_bias_cross_attn[:2_5_6]
_UpperCAmelCase : Optional[Any] =in_proj_weight_cross_attn[2_5_6:5_1_2, :]
_UpperCAmelCase : List[Any] =in_proj_bias_cross_attn[2_5_6:5_1_2]
_UpperCAmelCase : Optional[int] =in_proj_weight_cross_attn[-2_5_6:, :]
_UpperCAmelCase : Any =in_proj_bias_cross_attn[-2_5_6:]
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] ='http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase : Optional[Any] =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =get_detr_config(__lowerCamelCase )
# load original model from torch hub
_UpperCAmelCase : Tuple ={
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(f"Converting model {model_name}..." )
_UpperCAmelCase : str =torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=__lowerCamelCase ).eval()
_UpperCAmelCase : List[Any] =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__lowerCamelCase ):
if is_panoptic:
_UpperCAmelCase : List[Any] ='detr.' + src
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCamelCase , is_panoptic=__lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase : List[Any] ='detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_UpperCAmelCase : List[str] =state_dict.pop(__lowerCamelCase )
_UpperCAmelCase : Dict =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCAmelCase : Union[str, Any] =state_dict.pop(__lowerCamelCase )
_UpperCAmelCase : Optional[int] =val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_UpperCAmelCase : Union[str, Any] =state_dict.pop(__lowerCamelCase )
_UpperCAmelCase : Tuple =val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_UpperCAmelCase : Optional[Any] =state_dict.pop(__lowerCamelCase )
_UpperCAmelCase : Tuple =val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase : List[Any] =DetrForSegmentation(__lowerCamelCase ) if is_panoptic else DetrForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify our conversion on an image
_UpperCAmelCase : str ='coco_panoptic' if is_panoptic else 'coco_detection'
_UpperCAmelCase : Tuple =DetrImageProcessor(format=__lowerCamelCase )
_UpperCAmelCase : List[str] =processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase : Tuple =encoding['pixel_values']
_UpperCAmelCase : Union[str, Any] =detr(__lowerCamelCase )
_UpperCAmelCase : List[str] =model(__lowerCamelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(f"nielsr/{model_name}" )
processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
lowercase =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 711
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase =random.Random()
if is_torch_available():
import torch
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=1.0 , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None ):
'''simple docstring'''
if rng is None:
_UpperCAmelCase : Optional[Any] =global_rng
_UpperCAmelCase : Optional[int] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=4_0_0 , snake_case=2_0_0_0 , snake_case=1 , snake_case=0.0 , snake_case=1_6_0_0_0 , snake_case=True , snake_case=True , ) -> int:
'''simple docstring'''
_UpperCAmelCase : int =parent
_UpperCAmelCase : Any =batch_size
_UpperCAmelCase : Tuple =min_seq_length
_UpperCAmelCase : Tuple =max_seq_length
_UpperCAmelCase : Optional[Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : int =feature_size
_UpperCAmelCase : List[str] =padding_value
_UpperCAmelCase : int =sampling_rate
_UpperCAmelCase : List[str] =return_attention_mask
_UpperCAmelCase : Tuple =do_normalize
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self , snake_case=False , snake_case=False) -> Any:
'''simple docstring'''
def _flatten(snake_case):
return list(itertools.chain(*snake_case))
if equal_length:
_UpperCAmelCase : List[Any] =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
_UpperCAmelCase : Optional[Any] =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_UpperCAmelCase : Optional[int] =[np.asarray(snake_case) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =ASTFeatureExtractor
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple =ASTFeatureExtractionTester(self)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase : str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : str =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
_UpperCAmelCase : Optional[int] =[np.asarray(snake_case) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase : List[str] =feat_extract(speech_inputs[0] , return_tensors='np').input_values
_UpperCAmelCase : List[Any] =feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
# Test batched
_UpperCAmelCase : Tuple =feat_extract(snake_case , padding=snake_case , return_tensors='np').input_values
_UpperCAmelCase : List[Any] =feat_extract(snake_case , padding=snake_case , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : Dict =[floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase : Tuple =np.asarray(snake_case)
_UpperCAmelCase : Optional[Any] =feat_extract(snake_case , return_tensors='np').input_values
_UpperCAmelCase : Dict =feat_extract(snake_case , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3))
@require_torch
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
import torch
_UpperCAmelCase : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCAmelCase : int =np.random.rand(1_0_0).astype(np.floataa)
_UpperCAmelCase : str =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : Optional[Any] =feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
_UpperCAmelCase : List[str] =feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def lowerCAmelCase ( self , snake_case) -> int:
'''simple docstring'''
from datasets import load_dataset
_UpperCAmelCase : Optional[Any] =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
_UpperCAmelCase : int =ds.sort('id').select(range(snake_case))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
# fmt: off
_UpperCAmelCase : List[str] =torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69])
# fmt: on
_UpperCAmelCase : Dict =self._load_datasamples(1)
_UpperCAmelCase : Optional[Any] =ASTFeatureExtractor()
_UpperCAmelCase : Optional[Any] =feature_extractor(snake_case , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , snake_case , atol=1E-4))
| 331
| 0
|
'''simple docstring'''
from math import factorial
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : int )-> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_UpperCamelCase ) // (factorial(_UpperCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
F"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
)
| 138
|
'''simple docstring'''
def lowercase__( _UpperCamelCase : int = 100 )-> int:
"""simple docstring"""
_UpperCamelCase = set()
_UpperCamelCase = 0
_UpperCamelCase = n + 1 # maximum limit
for a in range(2 , _UpperCamelCase ):
for b in range(2 , _UpperCamelCase ):
_UpperCamelCase = a**b # calculates the current power
collect_powers.add(_UpperCamelCase ) # adds the result to the set
return len(_UpperCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 138
| 1
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableUnCLIPPipeline
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowercase = False
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : int = 32
UpperCAmelCase : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCAmelCase : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A , projection_dim=A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=A , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=A , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase : Any = StableUnCLIPImageNormalizer(embedding_dim=A )
UpperCAmelCase : Any = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCAmelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A , layers_per_block=1 , upcast_attention=A , use_linear_projection=A , )
torch.manual_seed(0 )
UpperCAmelCase : Any = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=A , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = AutoencoderKL()
UpperCAmelCase : str = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def _lowercase( self , A , A=0 ) -> Optional[int]:
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Union[str, Any] = torch.manual_seed(A )
else:
UpperCAmelCase : Dict = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> Dict:
UpperCAmelCase : List[str] = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : List[str] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=A )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
UpperCAmelCase : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase : Union[str, Any] = pipe("""anime turle""" , generator=A , output_type="""np""" )
UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A , A )
def _lowercase( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : Any = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
UpperCAmelCase : Any = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : Dict = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 672
|
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672
| 1
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 63
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE ={
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__SCREAMING_SNAKE_CASE ={
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
__SCREAMING_SNAKE_CASE ="""▁"""
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : int = ["input_ids", "attention_mask"]
def __init__( self: List[str] , _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any]="</s>" , _lowerCamelCase: Optional[int]="<unk>" , _lowerCamelCase: List[Any]="<pad>" , _lowerCamelCase: int=1_00 , _lowerCamelCase: Tuple=None , _lowerCamelCase: Optional[Dict[str, Any]] = None , _lowerCamelCase: str=True , **_lowerCamelCase: int , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE_ = [f"<extra_id_{i}>" for i in range(_lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE_ = len(set(filter(lambda _lowerCamelCase : bool('''extra_id''' in str(_lowerCamelCase ) ) , _lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
SCREAMING_SNAKE_CASE_ = legacy
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , extra_ids=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = extra_ids
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@staticmethod
def _A ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int] ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _lowerCamelCase , )
return max_model_length
@property
def _A ( self: Tuple ):
return self.sp_model.get_piece_size() + self._extra_ids
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self: str , _lowerCamelCase: List[int] , _lowerCamelCase: Optional[List[int]] = None , _lowerCamelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + [1]
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def _A ( self: List[Any] ):
return list(
set(filter(lambda _lowerCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def _A ( self: Optional[int] ):
return [self._convert_token_to_id(_lowerCamelCase ) for token in self.get_sentinel_tokens()]
def _A ( self: Union[str, Any] , _lowerCamelCase: List[int] ):
if len(_lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _A ( self: Optional[Any] , _lowerCamelCase: List[int] , _lowerCamelCase: Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _A ( self: int , _lowerCamelCase: List[int] , _lowerCamelCase: Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = self._add_eos_if_not_present(_lowerCamelCase )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE_ = self._add_eos_if_not_present(_lowerCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self: Tuple ):
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
return state
def __setstate__( self: Any , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self: Dict , _lowerCamelCase: "TextInput" , **_lowerCamelCase: Tuple ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
SCREAMING_SNAKE_CASE_ = SPIECE_UNDERLINE + text.replace(_lowerCamelCase , ''' ''' )
return super().tokenize(_lowerCamelCase , **_lowerCamelCase )
def _A ( self: Any , _lowerCamelCase: str , **_lowerCamelCase: List[str] ):
if not self.legacy:
SCREAMING_SNAKE_CASE_ = text.startswith(_lowerCamelCase )
if is_first:
SCREAMING_SNAKE_CASE_ = text[1:]
SCREAMING_SNAKE_CASE_ = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _A ( self: str , _lowerCamelCase: List[str] ):
if token.startswith('''<extra_id_''' ):
SCREAMING_SNAKE_CASE_ = re.match(R'''<extra_id_(\d+)>''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_lowerCamelCase )
def _A ( self: Dict , _lowerCamelCase: Any ):
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE_ = self.sp_model.IdToPiece(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def _A ( self: Tuple , _lowerCamelCase: Optional[int] ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = []
else:
current_sub_tokens.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def _A ( self: int , _lowerCamelCase: str , _lowerCamelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 234
| 0
|
"""simple docstring"""
from __future__ import annotations
UpperCamelCase_ : Union[str, Any] = '''#'''
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any ) -> None:
"""simple docstring"""
A_ = {}
def lowerCamelCase__ ( self : int , _snake_case : str ) -> None:
"""simple docstring"""
A_ = self._trie
for char in text:
if char not in trie:
A_ = {}
A_ = trie[char]
A_ = True
def lowerCamelCase__ ( self : int , _snake_case : str ) -> tuple | list:
"""simple docstring"""
A_ = self._trie
for char in prefix:
if char in trie:
A_ = trie[char]
else:
return []
return self._elements(_snake_case )
def lowerCamelCase__ ( self : Optional[int] , _snake_case : dict ) -> tuple:
"""simple docstring"""
A_ = []
for c, v in d.items():
A_ = [" "] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
UpperCamelCase_ : Any = Trie()
UpperCamelCase_ : List[str] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def A_ (__a ):
'''simple docstring'''
A_ = trie.find_word(__a )
return tuple(string + word for word in suffixes )
def A_ ():
'''simple docstring'''
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 482
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *_snake_case : int , **_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
pass
def A_ (__a ):
'''simple docstring'''
A_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ (__a ):
'''simple docstring'''
A_ = np.array(__a )
A_ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : int ) -> str:
"""simple docstring"""
A_ = MaskGenerationPipeline(model=_snake_case , image_processor=_snake_case )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : int , _snake_case : int , _snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
A_ = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
A_ = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = "facebook/sam-vit-huge"
A_ = pipeline("mask-generation" , model=_snake_case )
A_ = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
] , )
| 482
| 1
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 359
|
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case_ ( lowerCAmelCase_ : bytes , lowerCAmelCase_ : int ):
__lowercase : List[str] = F"{sampling_rate}"
__lowercase : List[str] = """1"""
__lowercase : Tuple = """f32le"""
__lowercase : Tuple = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowercase : Union[str, Any] = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
__lowercase : str = output_stream[0]
__lowercase : str = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : float , lowerCAmelCase_ : str = "f32le" , ):
__lowercase : List[Any] = F"{sampling_rate}"
__lowercase : Tuple = """1"""
if format_for_conversion == "s16le":
__lowercase : Optional[Any] = 2
elif format_for_conversion == "f32le":
__lowercase : Tuple = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
__lowercase : int = platform.system()
if system == "Linux":
__lowercase : Optional[Any] = """alsa"""
__lowercase : Optional[Any] = """default"""
elif system == "Darwin":
__lowercase : str = """avfoundation"""
__lowercase : Optional[int] = """:0"""
elif system == "Windows":
__lowercase : Tuple = """dshow"""
__lowercase : int = """default"""
__lowercase : str = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
__lowercase : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowercase : Optional[int] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[Tuple[float, float], float]] = None , lowerCAmelCase_ : str = "f32le" , ):
if stream_chunk_s is not None:
__lowercase : Optional[int] = stream_chunk_s
else:
__lowercase : List[str] = chunk_length_s
__lowercase : List[str] = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
__lowercase : Any = np.intaa
__lowercase : str = 2
elif format_for_conversion == "f32le":
__lowercase : Any = np.floataa
__lowercase : List[str] = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
__lowercase : Union[str, Any] = chunk_length_s / 6
__lowercase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
__lowercase : Dict = [stride_length_s, stride_length_s]
__lowercase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowercase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowercase : Dict = datetime.datetime.now()
__lowercase : int = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
__lowercase : str = np.frombuffer(item["""raw"""] , dtype=lowerCAmelCase_ )
__lowercase : int = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
__lowercase : List[str] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple[int, int] , lowerCAmelCase_ : bool = False ):
__lowercase : Optional[Any] = b""""""
__lowercase , __lowercase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
__lowercase : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
__lowercase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
__lowercase : Union[str, Any] = (_stride_left, stride_right)
__lowercase : Optional[int] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
__lowercase : List[Any] = False
yield item
__lowercase : List[str] = stride_left
__lowercase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
__lowercase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
__lowercase : List[Any] = False
yield item
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Any = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
__lowercase : List[str] = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 649
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__lowerCamelCase : List[int] =list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__lowerCamelCase : List[int] =list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Benchmark training of model'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Verbose memory tracing'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Trace memory line by line'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save result to a CSV file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save all print statements in a log file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Whether to print environment information'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , )
__lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__lowerCamelCase : int =field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __lowercase , )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 225
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__lowerCamelCase : List[int] =list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__lowerCamelCase : List[int] =list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Benchmark training of model'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Verbose memory tracing'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Trace memory line by line'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save result to a CSV file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save all print statements in a log file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Whether to print environment information'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , )
__lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__lowerCamelCase : int =field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __lowercase , )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 225
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: Optional[int] = ["""image_processor""", """tokenizer"""]
__A: Any = """FlavaImageProcessor"""
__A: Dict = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None , **_lowerCamelCase : Optional[int] ):
_UpperCAmelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowerCamelCase , )
_UpperCAmelCase : List[str] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Dict = self.image_processor
def __call__( self : List[Any] , _lowerCamelCase : Optional[ImageInput] = None , _lowerCamelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _lowerCamelCase : bool = True , _lowerCamelCase : Union[bool, str, PaddingStrategy] = False , _lowerCamelCase : Union[bool, str, TruncationStrategy] = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 0 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[str, TensorType]] = None , **_lowerCamelCase : Dict , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCAmelCase : List[str] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
if images is not None:
_UpperCAmelCase : Union[str, Any] = self.image_processor(
_lowerCamelCase , return_image_mask=_lowerCamelCase , return_codebook_pixels=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
if text is not None and images is not None:
encoding.update(_lowerCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def a__ ( self : str , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def a__ ( self : Dict , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[int] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def a__ ( self : List[Any] ):
_UpperCAmelCase : str = self.tokenizer.model_input_names
_UpperCAmelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self : Optional[int] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowerCamelCase , )
return self.image_processor_class
@property
def a__ ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowerCamelCase , )
return self.image_processor
| 328
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
set_seed(7_7_0)
__lowerCamelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
__lowerCamelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
__lowerCamelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCamelCase = os.path.join(os.path.expanduser('~'), '.cache')
__lowerCamelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = model_type
if use_small:
key += "_small"
return os.path.join(_SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]["file_name"] )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , local_dir=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> Dict:
"""simple docstring"""
if model_type == "text":
_UpperCAmelCase : int = BarkSemanticModel
_UpperCAmelCase : Dict = BarkSemanticConfig
_UpperCAmelCase : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_UpperCAmelCase : Optional[Any] = BarkCoarseModel
_UpperCAmelCase : int = BarkCoarseConfig
_UpperCAmelCase : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
_UpperCAmelCase : Dict = BarkFineModel
_UpperCAmelCase : Optional[Any] = BarkFineConfig
_UpperCAmelCase : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_UpperCAmelCase : Union[str, Any] = F"""{model_type}_small""" if use_small else model_type
_UpperCAmelCase : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
_UpperCAmelCase : int = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
# this is a hack
_UpperCAmelCase : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_UpperCAmelCase : List[Any] = model_args["vocab_size"]
_UpperCAmelCase : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_UpperCAmelCase : str = model_args.pop("n_head" )
_UpperCAmelCase : Optional[int] = model_args.pop("n_embd" )
_UpperCAmelCase : Optional[Any] = model_args.pop("n_layer" )
_UpperCAmelCase : Tuple = ConfigClass(**checkpoint["model_args"] )
_UpperCAmelCase : List[Any] = ModelClass(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = GenerationConfigClass()
_UpperCAmelCase : str = model_generation_config
_UpperCAmelCase : Optional[int] = checkpoint["model"]
# fixup checkpoint
_UpperCAmelCase : Union[str, Any] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(_SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
_UpperCAmelCase : List[Any] = k[len(_SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
_UpperCAmelCase : Optional[Any] = new_k.replace(_SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
_UpperCAmelCase : str = state_dict.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_UpperCAmelCase : List[Any] = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_UpperCAmelCase : str = set(model.state_dict().keys() ) - set(state_dict.keys() )
_UpperCAmelCase : Optional[int] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(_SCREAMING_SNAKE_CASE , 3 )} loss""" )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> Tuple:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_UpperCAmelCase : Optional[Any] = "cpu" # do conversion on cpu
_UpperCAmelCase : List[str] = _get_ckpt_path(_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = _load_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
# load bark initial model
_UpperCAmelCase : Union[str, Any] = _bark_load_model(_SCREAMING_SNAKE_CASE , "cpu" , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
if model_type == "text":
_UpperCAmelCase : Tuple = bark_model["model"]
if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_UpperCAmelCase : Dict = 5
_UpperCAmelCase : List[str] = 1_0
if model_type in ["text", "coarse"]:
_UpperCAmelCase : str = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
_UpperCAmelCase : Dict = bark_model(_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
# take last logits
_UpperCAmelCase : Tuple = output_new_model_total.logits[:, [-1], :]
else:
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : Any = 8
_UpperCAmelCase : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = bark_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_UpperCAmelCase : Tuple = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_UpperCAmelCase : str = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_UpperCAmelCase : Dict = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_UpperCAmelCase : List[Any] = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_UpperCAmelCase : Dict = BarkConfig.from_sub_model_configs(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_UpperCAmelCase : Any = BarkModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = semantic
_UpperCAmelCase : Tuple = coarseAcoustic
_UpperCAmelCase : str = fineAcoustic
_UpperCAmelCase : str = codec
_UpperCAmelCase : Optional[Any] = bark_generation_config
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
bark.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
__lowerCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 328
| 1
|
'''simple docstring'''
import requests
__SCREAMING_SNAKE_CASE : Tuple ='https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : str ):
'''simple docstring'''
A: List[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 135
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : str =TypeVar('KEY')
__SCREAMING_SNAKE_CASE : Dict =TypeVar('VAL')
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
"""simple docstring"""
A__ : KEY
A__ : VAL
class SCREAMING_SNAKE_CASE__ ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
super().__init__(A , A )
def __bool__( self ) -> bool:
return False
__SCREAMING_SNAKE_CASE : Optional[Any] =_DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , A = 8 , A = 0.75 ) -> None:
A: int = initial_block_size
A: list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A: Any = capacity_factor
A: List[str] = 0
def a__ ( self , A ) -> int:
return hash(A ) % len(self._buckets )
def a__ ( self , A ) -> int:
return (ind + 1) % len(self._buckets )
def a__ ( self , A , A , A ) -> bool:
A: int = self._buckets[ind]
if not stored:
A: Union[str, Any] = _Item(A , A )
self._len += 1
return True
elif stored.key == key:
A: Any = _Item(A , A )
return True
else:
return False
def a__ ( self ) -> bool:
A: Optional[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(A )
def a__ ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
A: List[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ ( self , A ) -> None:
A: Optional[Any] = self._buckets
A: Optional[int] = [None] * new_size
A: Dict = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def a__ ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def a__ ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def a__ ( self , A ) -> Iterator[int]:
A: List[str] = self._get_bucket_index(A )
for _ in range(len(self._buckets ) ):
yield ind
A: Dict = self._get_next_ind(A )
def a__ ( self , A , A ) -> None:
for ind in self._iterate_buckets(A ):
if self._try_set(A , A , A ):
break
def __setitem__( self , A , A ) -> None:
if self._is_full():
self._size_up()
self._add_item(A , A )
def __delitem__( self , A ) -> None:
for ind in self._iterate_buckets(A ):
A: Optional[int] = self._buckets[ind]
if item is None:
raise KeyError(A )
if item is _deleted:
continue
if item.key == key:
A: Dict = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , A ) -> VAL:
for ind in self._iterate_buckets(A ):
A: List[str] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
A: List[Any] = """ ,""".join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 135
| 1
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__UpperCamelCase = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 327
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'num_attention_heads' ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[128, 256, 384] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Union[str, Any]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Dict = False
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __A ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> Optional[int]:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def __A ( self ) -> Dict:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def __A ( self ) -> List[Any]:
pass
@unittest.skip(reason='Levit does not output attentions' )
def __A ( self ) -> Optional[int]:
pass
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __A ( self ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
SCREAMING_SNAKE_CASE = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def __A ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ ).loss
loss.backward()
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ ).loss
loss.backward()
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
SCREAMING_SNAKE_CASE = problem_type['title']
SCREAMING_SNAKE_CASE = problem_type['num_labels']
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __A ( self ) -> List[Any]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 327
| 1
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class __lowerCamelCase :
def __init__(self ):
'''simple docstring'''
_lowerCAmelCase = psutil.Process()
_lowerCAmelCase = False
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = -1
while True:
_lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = True
_lowerCAmelCase = threading.Thread(target=self.peak_monitor )
_lowerCAmelCase = True
self.thread.start()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
SCREAMING_SNAKE_CASE : Tuple = PeakCPUMemory()
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase = torch.cuda.memory_allocated(snake_case_ )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
_lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase = (torch.cuda.memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
_lowerCAmelCase = (torch.cuda.max_memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
return measures
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : List[str] ) -> Tuple:
"""simple docstring"""
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(snake_case_ )]:.2f}MiB""" )
_lowerCAmelCase = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 156
|
"""simple docstring"""
import os
def __UpperCAmelCase ( snake_case_ : str = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) as input_file:
_lowerCAmelCase = [
[int(snake_case_ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
_lowerCAmelCase = len(snake_case_ )
_lowerCAmelCase = len(matrix[0] )
_lowerCAmelCase = [[-1 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
for i in range(snake_case_ ):
_lowerCAmelCase = matrix[i][0]
for j in range(1 , snake_case_ ):
for i in range(snake_case_ ):
_lowerCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , snake_case_ ):
_lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 156
| 1
|
"""simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase ( ):
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 700
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 595
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class a ( a__ ):
snake_case__ = '''SpeechT5FeatureExtractor'''
snake_case__ = '''SpeechT5Tokenizer'''
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = kwargs.pop('audio' , _snake_case )
lowerCAmelCase = kwargs.pop('text' , _snake_case )
lowerCAmelCase = kwargs.pop('text_target' , _snake_case )
lowerCAmelCase = kwargs.pop('audio_target' , _snake_case )
lowerCAmelCase = kwargs.pop('sampling_rate' , _snake_case )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
elif text is not None:
lowerCAmelCase = self.tokenizer(_snake_case , **_snake_case )
else:
lowerCAmelCase = None
if audio_target is not None:
lowerCAmelCase = self.feature_extractor(audio_target=_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
lowerCAmelCase = targets['input_values']
elif text_target is not None:
lowerCAmelCase = self.tokenizer(_snake_case , **_snake_case )
lowerCAmelCase = targets['input_ids']
else:
lowerCAmelCase = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase = labels
lowerCAmelCase = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase = decoder_attention_mask
return inputs
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = kwargs.pop('input_values' , _snake_case )
lowerCAmelCase = kwargs.pop('input_ids' , _snake_case )
lowerCAmelCase = kwargs.pop('labels' , _snake_case )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase = self.feature_extractor.pad(_snake_case , *_snake_case , **_snake_case )
elif input_ids is not None:
lowerCAmelCase = self.tokenizer.pad(_snake_case , **_snake_case )
else:
lowerCAmelCase = None
if labels is not None:
if "input_ids" in labels or (isinstance(_snake_case , _snake_case ) and "input_ids" in labels[0]):
lowerCAmelCase = self.tokenizer.pad(_snake_case , **_snake_case )
lowerCAmelCase = targets['input_ids']
else:
lowerCAmelCase = self.feature_extractor.feature_size
lowerCAmelCase = self.feature_extractor.num_mel_bins
lowerCAmelCase = self.feature_extractor.pad(_snake_case , *_snake_case , **_snake_case )
lowerCAmelCase = feature_size_hack
lowerCAmelCase = targets['input_values']
else:
lowerCAmelCase = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase = labels
lowerCAmelCase = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase = decoder_attention_mask
return inputs
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
| 4
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 215
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case_ : int = "facebook/wmt19-en-de"
snake_case_ : str = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case_ : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case_ : List[str] = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
snake_case_ : Dict = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case_ : str = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case_ : Tuple = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 169
|
def A (__A : list , __A : list , __A : int , __A : int , __A : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = knapsack(__A , __A , __A , __A , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ = values[index] + knapsack(
__A , __A , __A , max_weight - weights[index] , index + 1 )
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__lowercase : Optional[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
__lowercase : Any = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__lowercase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__lowercase : List[Any] = sorted(arg_to_scheduler.keys())
__lowercase : Tuple = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class __UpperCamelCase ( pl.LightningModule ):
def __init__( self , __a , __a=None , __a="base" , __a=None , __a=None , __a=None , **__a , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__a )
__a : int = 0
__a : Union[str, Any] = Path(self.hparams.output_dir )
__a : Tuple = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__a : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__a , **__a , )
else:
__a : PretrainedConfig = config
__a : Union[str, Any] = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __a , __a ):
assert hasattr(self.config , __a ), f"""model config doesn't have a `{p}` attribute"""
setattr(self.config , __a , getattr(self.hparams , __a ) )
if tokenizer is None:
__a : Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__a , )
else:
__a : PreTrainedTokenizer = tokenizer
__a : str = MODEL_MODES[mode]
if model is None:
__a : Optional[int] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__a , )
else:
__a : Optional[int] = model
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
__a : int = self.model_type.from_pretrained(*__a , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = arg_to_scheduler[self.hparams.lr_scheduler]
__a : int = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__a : Any = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model
__a : Dict = ['bias', 'LayerNorm.weight']
__a : List[str] = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__a : List[Any] = Adafactor(
__a , lr=self.hparams.learning_rate , scale_parameter=__a , relative_step=__a )
else:
__a : Dict = AdamW(
__a , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__a : Dict = optimizer
__a : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
return self.validation_step(__a , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.validation_end(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__a : int = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if stage == "test":
__a : Optional[Any] = len(self.test_dataloader().dataset )
else:
__a : Any = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__a )
__a : Optional[Any] = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self , __a , __a , __a = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__a , list(filter(__a , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = self.output_dir.joinpath('best_tfmr' )
__a : Union[str, Any] = self.step_count
self.model.save_pretrained(__a )
self.tokenizer.save_pretrained(__a )
@staticmethod
def __UpperCAmelCase ( __a , __a ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__a , type=__a , required=__a , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__a , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__a , type=__a , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__a ).parent / 'test_run' / 'cache' ) , type=__a , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__a , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__a , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__a , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__a , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__a , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__a , metavar=__a , type=__a , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__a , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__a , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__a , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__a )
parser.add_argument('--train_batch_size' , default=32 , type=__a )
parser.add_argument('--eval_batch_size' , default=32 , type=__a )
parser.add_argument('--adafactor' , action='store_true' )
class __UpperCamelCase ( pl.Callback ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __UpperCamelCase ( pl.Callback ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__a )
class __UpperCamelCase ( pl.Callback ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Any = trainer.lr_schedulers[0]['scheduler']
__a : Optional[Any] = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__a : Any = trainer.callback_metrics
# Log results
for key in sorted(__a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__a , str(metrics[key] ) ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__a : str = trainer.callback_metrics
# Log and save results to file
__a : Any = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__a , 'w' ) as writer:
for key in sorted(__a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__a , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__a , str(metrics[key] ) ) )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'model_checkpoints' ) , type=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_SCREAMING_SNAKE_CASE , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_SCREAMING_SNAKE_CASE , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-train-data' ) , type=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : BaseTransformer , _SCREAMING_SNAKE_CASE : argparse.Namespace , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : List[str]=[] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , **_SCREAMING_SNAKE_CASE : List[str] , ):
pl.seed_everything(args.seed )
# init model
__a : Optional[Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
# add custom checkpoints
if checkpoint_callback is None:
__a : str = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_SCREAMING_SNAKE_CASE )
if logging_callback is None:
__a : List[Any] = LoggingCallback()
__a : List[Any] = {}
if args.fpaa:
__a : Optional[int] = 16
if args.gpus > 1:
__a : Any = 'auto'
__a : Optional[int] = 'ddp'
__a : Union[str, Any] = args.accumulate_grad_batches
__a : Optional[Any] = None
__a : Optional[int] = 'auto'
__a : List[Any] = pl.Trainer.from_argparse_args(
_SCREAMING_SNAKE_CASE , weights_summary=_SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **_SCREAMING_SNAKE_CASE , )
if args.do_train:
trainer.fit(_SCREAMING_SNAKE_CASE )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 476
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
__a : int = 1_024
__a : Optional[int] = 4_096
__a : int = 24
__a : Any = 16
__a : str = [5, 11, 17, 23]
__a : Any = [256, 512, 1_024, 1_024]
__a : Optional[int] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__a : Union[str, Any] = 768
__a : List[Any] = [1, 1, 1, 0.5]
__a : Dict = [256, 512, 768, 768]
__a : List[Any] = 150
__a : Optional[Any] = 16
__a : Tuple = (1, 384, 384)
__a : Optional[Any] = False
__a : List[str] = 'project'
if "ade" in checkpoint_url:
__a : Dict = True
__a : Dict = 768
__a : int = [1, 1, 1, 0.5]
__a : Optional[Any] = 150
__a : Optional[int] = 16
__a : Optional[Any] = 'huggingface/label-files'
__a : Optional[Any] = 'ade20k-id2label.json'
__a : Any = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) ) , 'r' ) )
__a : Optional[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : List[str] = idalabel
__a : Optional[Any] = {v: k for k, v in idalabel.items()}
__a : str = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__a : Optional[Any] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
__a : Optional[Any] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
__a : Union[str, Any] = name.replace('patch_embed' , '' )
if "pos_embed" in name:
__a : int = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
__a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
__a : int = name.replace('proj' , 'projection' )
if "blocks" in name:
__a : Optional[int] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
__a : str = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a : str = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
__a : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
__a : Tuple = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
__a : int = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
__a : int = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
__a : str = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
__a : Dict = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
__a : Any = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
__a : List[str] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
__a : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__a : Optional[Any] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__a : Optional[Any] = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
__a : List[Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
__a : int = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
__a : List[str] = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
__a : Dict = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__a : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__a : Dict = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__a : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__a : Any = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__a : Any = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__a : Any = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__a : Dict = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__a : Tuple = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__a : Optional[int] = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__a : Tuple = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__a : str = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__a : Dict = name.replace('pretrained' , 'dpt' )
if "bn" in name:
__a : int = name.replace('bn' , 'batch_norm' )
if "head" in name:
__a : Any = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
__a : Union[str, Any] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
__a : List[str] = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
__a : List[str] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
__a : Union[str, Any] = name.replace('..' , '.' )
if "stem.conv" in name:
__a : Tuple = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
__a : Tuple = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
__a : str = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
__a : Union[str, Any] = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
__a : List[str] = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
__a : Tuple = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
__a : Union[str, Any] = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : Tuple = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__a : str = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__a : str = in_proj_weight[: config.hidden_size, :]
__a : Any = in_proj_bias[: config.hidden_size]
__a : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__a : List[str] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ():
__a : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any ):
__a , __a : Any = get_dpt_config(_SCREAMING_SNAKE_CASE )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
__a : str = state_dict.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
# read in qkv matrices
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : List[str] = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
__a : Optional[Any] = 480 if 'ade' in checkpoint_url else 384
__a : Union[str, Any] = DPTImageProcessor(size=_SCREAMING_SNAKE_CASE )
__a : List[Any] = prepare_img()
__a : Dict = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# forward pass
__a : Tuple = model(**_SCREAMING_SNAKE_CASE ).logits if 'ade' in checkpoint_url else model(**_SCREAMING_SNAKE_CASE ).predicted_depth
if show_prediction:
__a : Any = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=_SCREAMING_SNAKE_CASE , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
__lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
__lowercase : Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 476
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __snake_case ( _lowercase):
snake_case__ : Dict = "blenderbot-small"
snake_case__ : Optional[int] = ["past_key_values"]
snake_case__ : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __lowerCAmelCase : Dict=5_0_2_6_5 , __lowerCAmelCase : str=5_1_2 , __lowerCAmelCase : Any=8 , __lowerCAmelCase : Any=2_0_4_8 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : Dict=8 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Union[str, Any]=5_1_2 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Union[str, Any]=2 , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Tuple = d_model
_lowerCamelCase : Optional[Any] = encoder_ffn_dim
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : Any = encoder_attention_heads
_lowerCamelCase : int = decoder_ffn_dim
_lowerCamelCase : str = decoder_layers
_lowerCamelCase : str = decoder_attention_heads
_lowerCamelCase : int = dropout
_lowerCamelCase : Optional[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : str = activation_function
_lowerCamelCase : Dict = init_std
_lowerCamelCase : Optional[int] = encoder_layerdrop
_lowerCamelCase : List[str] = decoder_layerdrop
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : Dict = encoder_layers
_lowerCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase : List[Any] = {0: '''batch'''}
_lowerCamelCase : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
_lowerCamelCase : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCamelCase : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase , _lowerCamelCase : str = self.num_layers
for i in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_lowerCamelCase : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : List[str] = super().outputs
else:
_lowerCamelCase : int = super(__lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.num_layers
for i in range(__lowerCAmelCase ):
_lowerCamelCase : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Generate decoder inputs
_lowerCamelCase : Dict = seq_length if not self.use_past else 1
_lowerCamelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCamelCase : str = dict(**__lowerCAmelCase , **__lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase , _lowerCamelCase : Optional[Any] = common_inputs['''input_ids'''].shape
_lowerCamelCase : str = common_inputs['''decoder_input_ids'''].shape[1]
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.num_attention_heads
_lowerCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : Any = decoder_seq_length + 3
_lowerCamelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCamelCase : int = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase )] , dim=1 )
_lowerCamelCase : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCamelCase , _lowerCamelCase : int = self.num_layers
_lowerCamelCase : Union[str, Any] = min(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = max(__lowerCAmelCase , __lowerCAmelCase ) - min_num_layers
_lowerCamelCase : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
) )
# TODO: test this.
_lowerCamelCase : Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowerCAmelCase , __lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase , _lowerCamelCase : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_lowerCamelCase : Dict = seqlen + 2
_lowerCamelCase , _lowerCamelCase : Tuple = self.num_layers
_lowerCamelCase , _lowerCamelCase : List[Any] = self.num_attention_heads
_lowerCamelCase : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : Tuple = common_inputs['''attention_mask'''].dtype
_lowerCamelCase : Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
_lowerCamelCase : List[Any] = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(__lowerCAmelCase )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : Optional[int] = tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCamelCase : Optional[Any] = dict(tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCamelCase : Dict = self._generate_dummy_inputs_for_causal_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
else:
_lowerCamelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Any = super()._flatten_past_key_values_(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
_lowerCamelCase : Dict = super(__lowerCAmelCase , self )._flatten_past_key_values_(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 598
| 0
|
def __snake_case ( _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1000 ) -> int:
A_ : Optional[int] = 1
A_ : int = 0
for divide_by_number in range(_lowerCAmelCase , digit + 1 ):
A_ : list[int] = []
A_ : Union[str, Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCAmelCase ):
A_ : Optional[Any] = len(_lowerCAmelCase )
A_ : Union[str, Any] = divide_by_number
else:
has_been_divided.append(_lowerCAmelCase )
A_ : Dict = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 ) -> Any:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=0 ) -> int:
A_ : Union[str, Any] = []
for old_item in old_list:
A_ : Tuple = old_item.replace("in_layers.0" , "norm1" )
A_ : str = new_item.replace("in_layers.2" , "conv1" )
A_ : List[str] = new_item.replace("out_layers.0" , "norm2" )
A_ : Optional[int] = new_item.replace("out_layers.3" , "conv2" )
A_ : Union[str, Any] = new_item.replace("emb_layers.1" , "time_emb_proj" )
A_ : Tuple = new_item.replace("skip_connection" , "conv_shortcut" )
A_ : Any = shave_segments(_lowerCAmelCase , n_shave_prefix_segments=_lowerCAmelCase )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=0 ) -> Tuple:
A_ : Any = []
for old_item in old_list:
A_ : int = old_item
A_ : Dict = new_item.replace("norm.weight" , "group_norm.weight" )
A_ : Any = new_item.replace("norm.bias" , "group_norm.bias" )
A_ : Any = new_item.replace("proj_out.weight" , "proj_attn.weight" )
A_ : List[str] = new_item.replace("proj_out.bias" , "proj_attn.bias" )
A_ : List[Any] = shave_segments(_lowerCAmelCase , n_shave_prefix_segments=_lowerCAmelCase )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str]=None ) -> Optional[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
A_ : Any = old_checkpoint[path]
A_ : Tuple = old_tensor.shape[0] // 3
A_ : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
A_ : Optional[Any] = old_tensor.shape[0] // config["num_head_channels"] // 3
A_ : int = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
A_ , A_ , A_ : Optional[int] = old_tensor.split(channels // num_heads , dim=1 )
A_ : Tuple = query.reshape(_lowerCAmelCase )
A_ : List[Any] = key.reshape(_lowerCAmelCase )
A_ : Union[str, Any] = value.reshape(_lowerCAmelCase )
for path in paths:
A_ : Any = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
A_ : List[str] = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
A_ : int = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
A_ : Tuple = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
A_ : Tuple = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
A_ : Tuple = old_checkpoint[path["old"]][:, :, 0]
else:
A_ : str = old_checkpoint[path["old"]]
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Any = {}
A_ : Union[str, Any] = checkpoint["time_embed.0.weight"]
A_ : List[Any] = checkpoint["time_embed.0.bias"]
A_ : Any = checkpoint["time_embed.2.weight"]
A_ : Optional[Any] = checkpoint["time_embed.2.bias"]
A_ : List[str] = checkpoint["input_blocks.0.0.weight"]
A_ : Union[str, Any] = checkpoint["input_blocks.0.0.bias"]
A_ : str = checkpoint["out.0.weight"]
A_ : Optional[int] = checkpoint["out.0.bias"]
A_ : Optional[int] = checkpoint["out.2.weight"]
A_ : Optional[int] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
A_ : List[str] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
A_ : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key]
for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the middle blocks only
A_ : Union[str, Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
A_ : Tuple = {
layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key]
for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the output blocks only
A_ : Dict = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
A_ : Optional[Any] = {
layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key]
for layer_id in range(_lowerCAmelCase )
}
for i in range(1 , _lowerCAmelCase ):
A_ : Union[str, Any] = (i - 1) // (config["num_res_blocks"] + 1)
A_ : Optional[int] = (i - 1) % (config["num_res_blocks"] + 1)
A_ : str = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key]
A_ : List[Any] = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in checkpoint:
A_ : Tuple = checkpoint[
f"input_blocks.{i}.0.op.weight"
]
A_ : List[Any] = checkpoint[
f"input_blocks.{i}.0.op.bias"
]
continue
A_ : List[Any] = renew_resnet_paths(_lowerCAmelCase )
A_ : Any = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
A_ : Any = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path, resnet_op] , config=_lowerCAmelCase )
if len(_lowerCAmelCase ):
A_ : Tuple = renew_attention_paths(_lowerCAmelCase )
A_ : List[str] = {
"old": f"input_blocks.{i}.1",
"new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : Any = {
f"input_blocks.{i}.1.qkv.bias": {
"key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"input_blocks.{i}.1.qkv.weight": {
"key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=_lowerCAmelCase , config=_lowerCAmelCase , )
A_ : List[Any] = middle_blocks[0]
A_ : Tuple = middle_blocks[1]
A_ : Dict = middle_blocks[2]
A_ : str = renew_resnet_paths(_lowerCAmelCase )
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
A_ : Tuple = renew_resnet_paths(_lowerCAmelCase )
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
A_ : Any = renew_attention_paths(_lowerCAmelCase )
A_ : Optional[int] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , attention_paths_to_split=_lowerCAmelCase , config=_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
A_ : Optional[int] = i // (config["num_res_blocks"] + 1)
A_ : Optional[Any] = i % (config["num_res_blocks"] + 1)
A_ : Optional[Any] = [shave_segments(_lowerCAmelCase , 2 ) for name in output_blocks[i]]
A_ : Union[str, Any] = {}
for layer in output_block_layers:
A_ , A_ : str = layer.split("." )[0], shave_segments(_lowerCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_lowerCAmelCase )
else:
A_ : List[str] = [layer_name]
if len(_lowerCAmelCase ) > 1:
A_ : Dict = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
A_ : List[Any] = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
A_ : Optional[Any] = renew_resnet_paths(_lowerCAmelCase )
A_ : Any = renew_resnet_paths(_lowerCAmelCase )
A_ : Optional[Any] = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
A_ : int = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
A_ : Union[str, Any] = checkpoint[
f"output_blocks.{i}.{index}.conv.weight"
]
A_ : int = checkpoint[
f"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(_lowerCAmelCase ) == 2:
A_ : Union[str, Any] = []
if len(_lowerCAmelCase ):
A_ : Dict = renew_attention_paths(_lowerCAmelCase )
A_ : Union[str, Any] = {
"old": f"output_blocks.{i}.1",
"new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : Any = {
f"output_blocks.{i}.1.qkv.bias": {
"key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"output_blocks.{i}.1.qkv.weight": {
"key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=_lowerCAmelCase , )
else:
A_ : Optional[int] = renew_resnet_paths(_lowerCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
A_ : List[Any] = ".".join(["output_blocks", str(_lowerCAmelCase ), path["old"]] )
A_ : Optional[Any] = ".".join(["up_blocks", str(_lowerCAmelCase ), "resnets", str(_lowerCAmelCase ), path["new"]] )
A_ : Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCAmelCase : str = json.loads(f.read())
_lowerCAmelCase : Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCAmelCase : Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCAmelCase : List[str] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase : List[str] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 454
| 1
|
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_ ( snake_case_ : str ) -> dict:
'''simple docstring'''
__lowerCAmelCase = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(snake_case_ ).json()
def UpperCamelCase_ ( snake_case_ : int = 10 ) -> list[dict]:
'''simple docstring'''
__lowerCAmelCase = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
__lowerCAmelCase = requests.get(snake_case_ ).json()[:max_stories]
return [get_hackernews_story(snake_case_ ) for story_id in story_ids]
def UpperCamelCase_ ( snake_case_ : int = 10 ) -> str:
'''simple docstring'''
__lowerCAmelCase = hackernews_top_stories(snake_case_ )
return "\n".join("""* [{title}]({url})""".format(**snake_case_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 330
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_A : Union[str, Any] = get_tests_dir('''fixtures''')
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Optional[Any] ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase = mock.Mock()
__lowerCAmelCase = 5_00
__lowerCAmelCase = {}
__lowerCAmelCase = HTTPError
__lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
__lowerCAmelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def a ( self : Union[str, Any] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def a ( self : int ) -> Union[str, Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a ( cls : Optional[int] ) -> Tuple:
__lowerCAmelCase = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def a ( cls : Tuple ) -> int:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def a ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id="""test-image-processor""" , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def a ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def a ( self : Dict ) -> int:
CustomImageProcessor.register_for_auto_class()
__lowerCAmelCase = CustomImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 330
| 1
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def A__ ( lowercase: str = "" ) -> dict[str, float]:
A : str =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
A : Tuple =BeautifulSoup(requests.get(lowercase ).text, 'html.parser' )
A : Optional[int] =soup.find_all('td', attrs='titleColumn' )
A : Union[str, Any] =soup.find_all('td', class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase, lowercase )
}
def A__ ( lowercase: str = "IMDb_Top_250_Movies.csv" ) -> None:
A : str =get_imdb_top_aaa_movies()
with open(lowercase, 'w', newline='' ) as out_file:
A : int =csv.writer(lowercase )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 305
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowercase : str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowercase : list[int] =[ord(letter) for letter in string.ascii_lowercase]
_lowercase : set[int] ={ord(char) for char in VALID_CHARS}
_lowercase : list[str] =["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( lowercase: list[int], lowercase: tuple[int, ...] ) -> str | None:
A : str =""
A : int
A : int
A : int
for keychar, cipherchar in zip(cycle(lowercase ), lowercase ):
A : Tuple =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase )
return decoded
def A__ ( lowercase: list[int] ) -> list[str]:
A : list[str] =[]
for key in product(lowercase, repeat=3 ):
A : Any =try_key(lowercase, lowercase )
if encoded is not None:
possibles.append(lowercase )
return possibles
def A__ ( lowercase: list[str], lowercase: str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( lowercase: str = "p059_cipher.txt" ) -> int:
A : list[int]
A : list[str]
A : str
A : str
A : str =Path(lowercase ).parent.joinpath(lowercase ).read_text(encoding='utf-8' )
A : Tuple =[int(lowercase ) for number in data.strip().split(',' )]
A : Dict =filter_valid_chars(lowercase )
for common_word in COMMON_WORDS:
A : List[Any] =filter_common_word(lowercase, lowercase )
if len(lowercase ) == 1:
break
A : str =possibles[0]
return sum(ord(lowercase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 305
| 1
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _lowerCAmelCase ( _a ):
"""simple docstring"""
snake_case_ = ["""image_processor"""]
snake_case_ = """SamImageProcessor"""
def __init__( self : List[str] , __snake_case : List[Any] )-> Any:
super().__init__(snake_case_ )
snake_case = self.image_processor
snake_case = -10
snake_case = self.image_processor.size["""longest_edge"""]
def __call__( self : str , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : List[str]=None , __snake_case : Optional[Any] = None , **__snake_case : Dict , )-> BatchEncoding:
snake_case = self.image_processor(
snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# pop arguments that are not used in the foward but used nevertheless
snake_case = encoding_image_processor["""original_sizes"""]
if hasattr(snake_case_ , """numpy""" ): # Checks if Torch or TF tensor
snake_case = original_sizes.numpy()
snake_case , snake_case , snake_case = self._check_and_preprocess_points(
input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , )
snake_case = self._normalize_and_convert(
snake_case_ , snake_case_ , input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , return_tensors=snake_case_ , )
return encoding_image_processor
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : int , __snake_case : Any=None , __snake_case : Any=None , __snake_case : Dict=None , __snake_case : Tuple="pt" , )-> Tuple:
if input_points is not None:
if len(snake_case_ ) != len(snake_case_ ):
snake_case = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0] ) for point in input_points
]
else:
snake_case = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_ )
for point, original_size in zip(snake_case_ , snake_case_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
snake_case , snake_case = self._pad_points_and_labels(snake_case_ , snake_case_ )
snake_case = np.array(snake_case_ )
if input_labels is not None:
snake_case = np.array(snake_case_ )
if input_boxes is not None:
if len(snake_case_ ) != len(snake_case_ ):
snake_case = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0] , is_bounding_box=snake_case_ )
for box in input_boxes
]
else:
snake_case = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_ , is_bounding_box=snake_case_ )
for box, original_size in zip(snake_case_ , snake_case_ )
]
snake_case = np.array(snake_case_ )
if input_boxes is not None:
if return_tensors == "pt":
snake_case = torch.from_numpy(snake_case_ )
# boxes batch size of 1 by default
snake_case = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
snake_case = tf.convert_to_tensor(snake_case_ )
# boxes batch size of 1 by default
snake_case = tf.expand_dims(snake_case_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
snake_case = torch.from_numpy(snake_case_ )
# point batch size of 1 by default
snake_case = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
snake_case = tf.convert_to_tensor(snake_case_ )
# point batch size of 1 by default
snake_case = tf.expand_dims(snake_case_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
snake_case = torch.from_numpy(snake_case_ )
# point batch size of 1 by default
snake_case = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
snake_case = tf.convert_to_tensor(snake_case_ )
# point batch size of 1 by default
snake_case = tf.expand_dims(snake_case_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCAmelCase ( self : int , __snake_case : Any , __snake_case : str )-> List[str]:
snake_case = max([point.shape[0] for point in input_points] )
snake_case = []
for i, point in enumerate(snake_case_ ):
if point.shape[0] != expected_nb_points:
snake_case = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
snake_case = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(snake_case_ )
snake_case = processed_input_points
return input_points, input_labels
def lowerCAmelCase ( self : Tuple , __snake_case : List[str] , __snake_case : str , __snake_case : int , __snake_case : Union[str, Any]=False )-> np.ndarray:
snake_case , snake_case = original_size
snake_case , snake_case = self.image_processor._get_preprocess_shape(snake_case_ , longest_edge=snake_case_ )
snake_case = deepcopy(snake_case_ ).astype(snake_case_ )
if is_bounding_box:
snake_case = coords.reshape(-1 , 2 , 2 )
snake_case = coords[..., 0] * (new_w / old_w)
snake_case = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
snake_case = coords.reshape(-1 , 4 )
return coords
def lowerCAmelCase ( self : List[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : List[Any]=None , )-> Union[str, Any]:
if input_points is not None:
if hasattr(snake_case_ , """numpy""" ): # Checks for TF or Torch tensor
snake_case = input_points.numpy().tolist()
if not isinstance(snake_case_ , snake_case_ ) or not isinstance(input_points[0] , snake_case_ ):
raise ValueError("""Input points must be a list of list of floating points.""" )
snake_case = [np.array(snake_case_ ) for input_point in input_points]
else:
snake_case = None
if input_labels is not None:
if hasattr(snake_case_ , """numpy""" ):
snake_case = input_labels.numpy().tolist()
if not isinstance(snake_case_ , snake_case_ ) or not isinstance(input_labels[0] , snake_case_ ):
raise ValueError("""Input labels must be a list of list integers.""" )
snake_case = [np.array(snake_case_ ) for label in input_labels]
else:
snake_case = None
if input_boxes is not None:
if hasattr(snake_case_ , """numpy""" ):
snake_case = input_boxes.numpy().tolist()
if (
not isinstance(snake_case_ , snake_case_ )
or not isinstance(input_boxes[0] , snake_case_ )
or not isinstance(input_boxes[0][0] , snake_case_ )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
snake_case = [np.array(snake_case_ ).astype(np.floataa ) for box in input_boxes]
else:
snake_case = None
return input_points, input_labels, input_boxes
@property
def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]:
snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(snake_case_ ) )
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Any , **__snake_case : Tuple )-> Dict:
return self.image_processor.post_process_masks(*snake_case_ , **snake_case_ )
| 720
|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> Optional[int]:
snake_case = inspect.getfile(accelerate.test_utils )
snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
snake_case = [sys.executable] + distributed_args
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 517
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase (__UpperCamelCase ):
'''simple docstring'''
def __init__( self , a , a ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ :Optional[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = None , a = 0.0 , a = 50 , a = None , a = "pil" , a = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , a ):
snake_case_ :List[str] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case_ :List[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ :List[Any] = randn_tensor(a , generator=a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case_ :List[Any] = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ :Optional[int] = self.scheduler.step(
a , a , a , eta=a , use_clipped_model_output=a , generator=a ).prev_sample
snake_case_ :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ :List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ :List[Any] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 584
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase : Tuple = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase : int = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
__UpperCAmelCase : int = '▁'
class __lowerCAmelCase (__UpperCamelCase ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = AlbertTokenizer
def __init__( self , a=None , a=None , a=True , a=True , a=False , a="[CLS]" , a="[SEP]" , a="<unk>" , a="[SEP]" , a="<pad>" , a="[CLS]" , a="[MASK]" , **a , ):
"""simple docstring"""
snake_case_ :Tuple = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a )
if isinstance(a , a )
else mask_token
)
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
snake_case_ :Tuple = do_lower_case
snake_case_ :str = remove_space
snake_case_ :List[str] = keep_accents
snake_case_ :Union[str, Any] = vocab_file
snake_case_ :Any = False if not self.vocab_file else True
def _a ( self , a , a = None ):
"""simple docstring"""
snake_case_ :List[str] = [self.sep_token_id]
snake_case_ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , a , a = None ):
"""simple docstring"""
snake_case_ :Union[str, Any] = [self.sep_token_id]
snake_case_ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , a , a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ :Dict = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 584
| 1
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCAmelCase_ = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
UpperCAmelCase_ = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCAmelCase_ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCAmelCase ( A__ ) -> dict[str, int]:
_snake_case : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase ( A__ ) -> str:
return x[0]
def UpperCAmelCase ( A__ ) -> str:
_snake_case : Dict = get_letter_count(A__ )
_snake_case : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A__ )
_snake_case : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=A__ )
_snake_case : Optional[int] = """""".join(freq_to_letter[freq] )
_snake_case : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A__ , reverse=A__ )
_snake_case : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A__ )
def UpperCAmelCase ( A__ ) -> int:
_snake_case : List[Any] = get_frequency_order(A__ )
_snake_case : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCAmelCase ( A__ , A__ ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : int = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : Tuple = torch.permute(A__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A__ ):
# linear layer
_snake_case : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : List[str] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Dict = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCAmelCase ( A__ , A__ , A__ ) -> Optional[int]:
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("""metadata""" )
_snake_case : Tuple = """""".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
_snake_case : List[Any] = layer.split("""kvstore""" )
_snake_case : List[Any] = """""".join(split_layer[0] )[:-1]
_snake_case : Tuple = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
_snake_case : Any = layer.split("""/""" )
_snake_case : int = """/""".join(split_layer[:-1] )
_snake_case : Tuple = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : int = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_snake_case : List[Any] = """file"""
else:
_snake_case : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCAmelCase ( A__ , A__ ) -> Dict:
_snake_case : Union[str, Any] = rename_keys(A__ )
_snake_case : Tuple = {}
for k, v in current_block.items():
_snake_case : str = v
_snake_case : Union[str, Any] = new_current_block
torch.save(A__ , A__ )
def UpperCAmelCase ( A__ , A__ , A__ , A__ , A__ = WEIGHTS_NAME ) -> Dict:
_snake_case : int = convert_file_size_to_int(A__ )
_snake_case : List[str] = []
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = 0
_snake_case : List[Any] = 0
os.makedirs(A__ , exist_ok=A__ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
_snake_case : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
_snake_case : List[str] = flatten_dict(A__ , sep="""/""" )
_snake_case : Dict = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : Dict = get_key_and_tensorstore_dict(
A__ , A__ , A__ )
if curr_real_layer_name in all_layers:
_snake_case : Union[str, Any] = content
else:
_snake_case : Optional[int] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : Union[str, Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : List[Any] = torch.tensor(A__ )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Union[str, Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , A__ )
_snake_case : Tuple = """/""".join(A__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : str = os.path.join(
A__ , weights_name.replace(""".bin""" , f'''-{len(A__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(A__ , A__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : int = {}
_snake_case : Union[str, Any] = 0
_snake_case : Tuple = raw_weights.to(getattr(A__ , A__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : Dict = os.path.join(A__ , weights_name.replace(""".bin""" , f'''-{len(A__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(A__ , A__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(A__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : Optional[Any] = {}
_snake_case : List[str] = {}
for idx, shard in enumerate(A__ ):
_snake_case : str = weights_name.replace(
""".bin""" , f'''-{idx+1:05d}-of-{len(A__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_snake_case : int = os.path.join(A__ , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
_snake_case : Union[str, Any] = shard
for key in shard:
_snake_case : Tuple = shard_file
# Add the metadata
_snake_case : int = {"""total_size""": total_size}
_snake_case : str = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(A__ , A__ ) , """w""" , encoding="""utf-8""" ) as f:
_snake_case : int = json.dumps(A__ , indent=2 , sort_keys=A__ ) + """\n"""
f.write(A__ )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
UpperCAmelCase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCAmelCase ( ) -> Tuple:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : Any = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
_snake_case : Any = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
_snake_case : Union[str, Any] = TaTokenizer.from_pretrained("""t5-small""" )
_snake_case : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
_snake_case : str = tokenizer(A__ , return_tensors="""pt""" ).input_ids
_snake_case : Dict = model.generate(A__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 519
| 0
|
"""simple docstring"""
from collections import deque
class lowercase__ :
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
UpperCAmelCase_ = process_name # process name
UpperCAmelCase_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase_ = arrival_time
UpperCAmelCase_ = burst_time # remaining burst time
UpperCAmelCase_ = 0 # total time of the process wait in ready queue
UpperCAmelCase_ = 0 # time from arrival time to completion time
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int , ) -> None:
'''simple docstring'''
UpperCAmelCase_ = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase_ = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase_ = queue
# current time
UpperCAmelCase_ = current_time
# finished process is in this sequence queue
UpperCAmelCase_ = deque()
def lowercase__ ( self : Tuple ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowercase__ ( self : Tuple , _UpperCAmelCase : list[Process] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowercase__ ( self : List[str] , _UpperCAmelCase : list[Process] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowercase__ ( self : int , _UpperCAmelCase : list[Process] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowercase__ ( self : List[Any] , _UpperCAmelCase : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowercase__ ( self : Dict , _UpperCAmelCase : deque[Process] ) -> deque[Process]:
'''simple docstring'''
UpperCAmelCase_ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
UpperCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase_ = 0
# set the process's turnaround time because it is finished
UpperCAmelCase_ = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase_ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowercase__ ( self : List[Any] , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
UpperCAmelCase_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
UpperCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase_ = 0
# set the finish time
UpperCAmelCase_ = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowercase__ ( self : int ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase = Process("""P1""", 0, 53)
lowerCamelCase = Process("""P2""", 0, 17)
lowerCamelCase = Process("""P3""", 0, 68)
lowerCamelCase = Process("""P4""", 0, 24)
lowerCamelCase = 3
lowerCamelCase = [17, 25]
lowerCamelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
lowerCamelCase = Process("""P1""", 0, 53)
lowerCamelCase = Process("""P2""", 0, 17)
lowerCamelCase = Process("""P3""", 0, 68)
lowerCamelCase = Process("""P4""", 0, 24)
lowerCamelCase = 3
lowerCamelCase = [17, 25]
lowerCamelCase = deque([Pa, Pa, Pa, Pa])
lowerCamelCase = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 82
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = 1
SCREAMING_SNAKE_CASE__ :List[str] = 0
for divide_by_number in range(a_ , digit + 1 ):
SCREAMING_SNAKE_CASE__ :list[int] = []
SCREAMING_SNAKE_CASE__ :str = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(a_ ):
SCREAMING_SNAKE_CASE__ :int = len(a_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = divide_by_number
else:
has_been_divided.append(a_ )
SCREAMING_SNAKE_CASE__ :List[str] = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = BigBirdConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE__ :Tuple = BigBirdForQuestionAnswering(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :Any = BigBirdForPreTraining(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCAmelCase__ , UpperCAmelCase__ , is_trivia_qa=UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 320
| 0
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case :
def __init__( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : int=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=9_9 , UpperCamelCase__ : Any=3_6 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=5_1_2 , UpperCamelCase__ : Any=1_6 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : str=6 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : str=1_0_0_0 , )-> Any:
'''simple docstring'''
__lowerCAmelCase: str = parent
__lowerCAmelCase: Optional[int] = batch_size
__lowerCAmelCase: Dict = num_channels
__lowerCAmelCase: List[Any] = image_size
__lowerCAmelCase: Optional[Any] = patch_size
__lowerCAmelCase: Tuple = text_seq_length
__lowerCAmelCase: List[Any] = is_training
__lowerCAmelCase: List[str] = use_input_mask
__lowerCAmelCase: Optional[Any] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: int = hidden_size
__lowerCAmelCase: str = num_hidden_layers
__lowerCAmelCase: Union[str, Any] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Optional[Any] = hidden_act
__lowerCAmelCase: Any = hidden_dropout_prob
__lowerCAmelCase: List[Any] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[Any] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: Optional[int] = type_sequence_label_size
__lowerCAmelCase: List[str] = initializer_range
__lowerCAmelCase: Tuple = coordinate_size
__lowerCAmelCase: str = shape_size
__lowerCAmelCase: Union[str, Any] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Tuple = scope
__lowerCAmelCase: Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: str = text_seq_length
__lowerCAmelCase: str = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Optional[int])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[int] = bbox[i, j, 3]
__lowerCAmelCase: Dict = bbox[i, j, 1]
__lowerCAmelCase: str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Dict = bbox[i, j, 2]
__lowerCAmelCase: Tuple = bbox[i, j, 0]
__lowerCAmelCase: Union[str, Any] = t
__lowerCAmelCase: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase: int = None
if self.use_input_mask:
__lowerCAmelCase: Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length])
__lowerCAmelCase: List[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
__lowerCAmelCase: Any = None
__lowerCAmelCase: List[Any] = None
if self.use_labels:
__lowerCAmelCase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
__lowerCAmelCase: Any = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any)-> int:
'''simple docstring'''
__lowerCAmelCase: List[Any] = LayoutLMvaModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
# text + image
__lowerCAmelCase: List[Any] = model(UpperCamelCase__ , pixel_values=UpperCamelCase__)
__lowerCAmelCase: List[Any] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__)
__lowerCAmelCase: List[Any] = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__)
__lowerCAmelCase: List[str] = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
__lowerCAmelCase: Dict = model(UpperCamelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
__lowerCAmelCase: Optional[int] = model(pixel_values=UpperCamelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.num_labels
__lowerCAmelCase: Union[str, Any] = LayoutLMvaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Tuple = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str)-> str:
'''simple docstring'''
__lowerCAmelCase: Dict = self.num_labels
__lowerCAmelCase: Optional[Any] = LayoutLMvaForTokenClassification(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[Any] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def lowercase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Dict = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[Any] = config_and_inputs
__lowerCAmelCase: int = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowercase_ ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple)-> Any:
'''simple docstring'''
return True
def lowercase_ ( self : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = LayoutLMvaModelTester(self)
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def lowercase_ ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=False)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = copy.deepcopy(UpperCamelCase__)
if model_class in get_values(UpperCamelCase__):
__lowerCAmelCase: Union[str, Any] = {
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__):
__lowerCAmelCase: Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__)
elif model_class in get_values(UpperCamelCase__):
__lowerCAmelCase: int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__)
__lowerCAmelCase: Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__)
elif model_class in [
*get_values(UpperCamelCase__),
]:
__lowerCAmelCase: List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__)
elif model_class in [
*get_values(UpperCamelCase__),
]:
__lowerCAmelCase: List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : Tuple)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: List[str] = type
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__)
def lowercase_ ( self : Tuple)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__)
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__)
@slow
def lowercase_ ( self : Union[str, Any])-> List[str]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: int = LayoutLMvaModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
def a__ ( ) -> Union[str, Any]:
__lowerCAmelCase: str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class snake_case ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__) if is_vision_available() else None
@slow
def lowercase_ ( self : Union[str, Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Any = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base").to(UpperCamelCase__)
__lowerCAmelCase: Dict = self.default_image_processor
__lowerCAmelCase: List[Any] = prepare_img()
__lowerCAmelCase: Tuple = image_processor(images=UpperCamelCase__ , return_tensors="pt").pixel_values.to(UpperCamelCase__)
__lowerCAmelCase: List[Any] = torch.tensor([[1, 2]])
__lowerCAmelCase: Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
__lowerCAmelCase: Union[str, Any] = model(
input_ids=input_ids.to(UpperCamelCase__) , bbox=bbox.to(UpperCamelCase__) , pixel_values=pixel_values.to(UpperCamelCase__) , )
# verify the logits
__lowerCAmelCase: int = torch.Size((1, 1_9_9, 7_6_8))
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__)
__lowerCAmelCase: str = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(UpperCamelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4))
| 346
|
"""simple docstring"""
from functools import lru_cache
def a__ ( __SCREAMING_SNAKE_CASE ) -> set:
__lowerCAmelCase: Any = 2
__lowerCAmelCase: Optional[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(__SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
return len(unique_prime_factors(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
return len(set(__SCREAMING_SNAKE_CASE ) ) in (0, 1)
def a__ ( __SCREAMING_SNAKE_CASE ) -> list:
__lowerCAmelCase: int = 2
while True:
# Increment each value of a generated range
__lowerCAmelCase: Union[str, Any] = [base + i for i in range(__SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCAmelCase: Dict = [upf_len(__SCREAMING_SNAKE_CASE ) for x in group]
checker.append(__SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(__SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def a__ ( __SCREAMING_SNAKE_CASE = 4 ) -> int:
__lowerCAmelCase: List[str] = run(__SCREAMING_SNAKE_CASE )
return results[0] if len(__SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 346
| 1
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_UpperCamelCase : Optional[Any] =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
__lowerCamelCase = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(_snake_case ) )
if isinstance(_snake_case , _snake_case ):
__lowerCamelCase = [sequences]
__lowerCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_snake_case )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCamelCase )
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _snake_case=ZeroShotClassificationArgumentHandler() , *_snake_case , **_snake_case ):
"""simple docstring"""
__lowerCamelCase = args_parser
super().__init__(*_snake_case , **_snake_case )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def _lowerCamelCase ( self , _snake_case , _snake_case=True , _snake_case=True , _snake_case=TruncationStrategy.ONLY_FIRST , **_snake_case ):
"""simple docstring"""
__lowerCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
__lowerCamelCase = self.tokenizer.eos_token
try:
__lowerCamelCase = self.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , truncation=_snake_case , )
except Exception as e:
if "too short" in str(_snake_case ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__lowerCamelCase = self.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowerCamelCase ( self , **_snake_case ):
"""simple docstring"""
if kwargs.get('''multi_class''' , _snake_case ) is not None:
__lowerCamelCase = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
__lowerCamelCase = {}
if "candidate_labels" in kwargs:
__lowerCamelCase = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
__lowerCamelCase = kwargs['''hypothesis_template''']
__lowerCamelCase = {}
if "multi_label" in kwargs:
__lowerCamelCase = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , _snake_case , *_snake_case , **_snake_case , ):
"""simple docstring"""
if len(_snake_case ) == 0:
pass
elif len(_snake_case ) == 1 and "candidate_labels" not in kwargs:
__lowerCamelCase = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(_snake_case , **_snake_case )
def _lowerCamelCase ( self , _snake_case , _snake_case=None , _snake_case="This example is {}." ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._args_parser(_snake_case , _snake_case , _snake_case )
for i, (candidate_label, sequence_pair) in enumerate(zip(_snake_case , _snake_case ) ):
__lowerCamelCase = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_snake_case ) - 1,
**model_input,
}
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = inputs['''candidate_label''']
__lowerCamelCase = inputs['''sequence''']
__lowerCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
__lowerCamelCase = self.model(**_snake_case )
__lowerCamelCase = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def _lowerCamelCase ( self , _snake_case , _snake_case=False ):
"""simple docstring"""
__lowerCamelCase = [outputs['''candidate_label'''] for outputs in model_outputs]
__lowerCamelCase = [outputs['''sequence'''] for outputs in model_outputs]
__lowerCamelCase = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
__lowerCamelCase = logits.shape[0]
__lowerCamelCase = len(_snake_case )
__lowerCamelCase = N // n
__lowerCamelCase = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_snake_case ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__lowerCamelCase = self.entailment_id
__lowerCamelCase = -1 if entailment_id == 0 else 0
__lowerCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
__lowerCamelCase = np.exp(_snake_case ) / np.exp(_snake_case ).sum(-1 , keepdims=_snake_case )
__lowerCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__lowerCamelCase = reshaped_outputs[..., self.entailment_id]
__lowerCamelCase = np.exp(_snake_case ) / np.exp(_snake_case ).sum(-1 , keepdims=_snake_case )
__lowerCamelCase = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 575
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : Tuple ={
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =[
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : int =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 575
| 1
|
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase_ = os.path.join(git_repo_path, """src""", """diffusers""")
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__SCREAMING_SNAKE_CASE : List[str] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_A , '''torch_and_transformers_and_onnx''' )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _A )
self.assertIn('''torch_and_transformers''' , _A )
self.assertIn('''flax_and_transformers''' , _A )
self.assertIn('''torch_and_transformers_and_onnx''' , _A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_A , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : Dict = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : List[Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_A , _A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _A )
| 74
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 605
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __lowerCAmelCase ) -> float:
if not nums:
raise ValueError("""List is empty""" )
return sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __a :
'''simple docstring'''
def __init__( self , _a , _a=99 , _a=13 , _a=7 , _a=9 , _a=True , _a=True , _a=False , _a=32 , _a=5 , _a=4 , _a=37 , _a=8 , _a=0.1 , _a=0.002 , _a=1 , _a=0 , _a=0 , _a=None , _a=None , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = encoder_seq_length
SCREAMING_SNAKE_CASE__ : str = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[int] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_attention_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = d_ff
SCREAMING_SNAKE_CASE__ : Any = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor
SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : str = decoder_layers
def _a ( self ) -> Tuple:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _a ( self , _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_a )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_a )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ : Tuple = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
SCREAMING_SNAKE_CASE__ : List[str] = config.num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_inputs_dict(_a , _a , _a )
return config, input_dict
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = UMTaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(
input_ids=_a , decoder_input_ids=_a , attention_mask=_a , decoder_attention_mask=_a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(input_ids=_a , decoder_input_ids=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = result.last_hidden_state
SCREAMING_SNAKE_CASE__ : Dict = result.past_key_values
SCREAMING_SNAKE_CASE__ : Any = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
SCREAMING_SNAKE_CASE__ : str = model(_a , use_cache=_a )
SCREAMING_SNAKE_CASE__ : str = model(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , past_key_values=_a )["""last_hidden_state"""]
# select random slice
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def _a ( self , _a , _a , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).to(_a ).half().eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_a )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE :Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :List[str] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :Tuple = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = True
_SCREAMING_SNAKE_CASE :List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_SCREAMING_SNAKE_CASE :Union[str, Any] = [0.8, 0.9]
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Dict = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = config_and_inputs[0]
SCREAMING_SNAKE_CASE__ : Tuple = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_a ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
}
for attn_name, (name, mask) in zip(_a , head_masking.items() ):
SCREAMING_SNAKE_CASE__ : List[str] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE__ : str = torch.ones(
config.num_decoder_layers , config.num_heads , device=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_a , return_dict_in_generate=_a , **_a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE__ : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _a ( self ) -> Dict:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_a ).to(_a )
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_a , legacy=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_a , return_tensors="""pt""" , padding=_a ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids.to(_a ) )
SCREAMING_SNAKE_CASE__ : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(_a )
self.assertEqual(_a , _a )
| 12
| 1
|
def UpperCamelCase( __UpperCamelCase : int ):
if length <= 0 or not isinstance(snake_case__ ,snake_case__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(snake_case__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 171
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_a , )
lowerCamelCase = AutoencoderKL()
lowerCamelCase = DDIMScheduler()
lowerCamelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _lowerCAmelCase ( self , _a , _a=0 ):
"""simple docstring"""
if str(_a ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(_a )
else:
lowerCamelCase = torch.Generator(device=_a ).manual_seed(_a )
lowerCamelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """cpu"""
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCamelCase = self.get_dummy_inputs(_a )
lowerCamelCase = pipe(**_a ).images
lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCamelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
lowerCamelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
lowerCamelCase = pipe.get_label_ids(_a )
lowerCamelCase = pipe(_a , generator=_a , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(_a , _a ):
lowerCamelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
lowerCamelCase = ["""vase""", """umbrella"""]
lowerCamelCase = pipe.get_label_ids(_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(_a , generator=_a , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(_a , _a ):
lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 543
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase ( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] ) -> Any:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def UpperCamelCase ( lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : int=True ) -> int:
'''simple docstring'''
model.train()
lowercase =model(lowercase_ )
lowercase =F.mse_loss(lowercase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase_ )
def UpperCamelCase ( lowercase_ : int , lowercase_ : int=False ) -> Dict:
'''simple docstring'''
set_seed(4_2 )
lowercase =RegressionModel()
lowercase =deepcopy(lowercase_ )
lowercase =RegressionDataset(length=8_0 )
lowercase =DataLoader(lowercase_ , batch_size=1_6 )
model.to(accelerator.device )
if sched:
lowercase =AdamW(params=model.parameters() , lr=1E-3 )
lowercase =AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowercase =LambdaLR(lowercase_ , lr_lambda=lambda lowercase_ : epoch**0.6_5 )
lowercase =LambdaLR(lowercase_ , lr_lambda=lambda lowercase_ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowercase , lowercase , lowercase , lowercase =accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase , lowercase =accelerator.prepare(lowercase_ , lowercase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase ( lowercase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase , lowercase , lowercase =get_training_setup(lowercase_ )
# Use a single batch
lowercase , lowercase =next(iter(lowercase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase , lowercase =accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
# Sync grads
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowercase =ddp_input[torch.randperm(len(lowercase_ ) )]
def UpperCamelCase ( lowercase_ : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase , lowercase , lowercase =get_training_setup(lowercase_ )
# Use a single batch
lowercase , lowercase =next(iter(lowercase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase , lowercase =accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
# Sync grads
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowercase =ddp_input[torch.randperm(len(lowercase_ ) )]
def UpperCamelCase ( lowercase_ : List[Any]=False , lowercase_ : int=False ) -> Tuple:
'''simple docstring'''
lowercase =Accelerator(
split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase , lowercase , lowercase =get_training_setup(lowercase_ )
for iteration, batch in enumerate(lowercase_ ):
lowercase , lowercase =batch.values()
# Gather the distributed inputs and targs for the base model
lowercase , lowercase =accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowercase =ddp_input[torch.randperm(len(lowercase_ ) )]
GradientState._reset_state()
def UpperCamelCase ( lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
lowercase =Accelerator(
split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase =get_training_setup(lowercase_ , lowercase_ )
for iteration, batch in enumerate(lowercase_ ):
lowercase , lowercase =batch.values()
# Gather the distributed inputs and targs for the base model
lowercase , lowercase =accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase_ ):
step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
lowercase =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowercase =Accelerator()
lowercase =RegressionDataset(length=8_0 )
lowercase =DataLoader(lowercase_ , batch_size=1_6 )
lowercase =RegressionDataset(length=9_6 )
lowercase =DataLoader(lowercase_ , batch_size=1_6 )
lowercase , lowercase =accelerator.prepare(lowercase_ , lowercase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ )
if iteration < len(lowercase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ )
if batch_num < len(lowercase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowercase =Accelerator()
lowercase =accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(lowercase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(lowercase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase_ , lowercase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 709
|
'''simple docstring'''
import argparse
import json
import subprocess
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase =[]
lowercase =(
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowercase =subprocess.run(lowercase_ , shell=lowercase_ , stdout=subprocess.PIPE )
lowercase =output.stdout.decode('''utf-8''' )
lowercase =json.loads(lowercase_ )
lowercase =status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase_ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(lowercase_ ) )
if len(lowercase_ ) > 0:
lowercase ='''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def UpperCamelCase ( lowercase_ : int ) -> Optional[int]:
'''simple docstring'''
return values.split(''',''' )
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 145
| 0
|
from __future__ import annotations
import math
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = size
# approximate the overall size of segment tree with given value
snake_case : Dict = [0 for i in range(0 ,4 * size )]
# create array to store lazy update
snake_case : List[Any] = [0 for i in range(0 ,4 * size )]
snake_case : Any = [0 for i in range(0 ,4 * size )] # flag for lazy update
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return idx * 2
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return idx * 2 + 1
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if left_element == right_element:
snake_case : int = a[left_element - 1]
else:
snake_case : List[str] = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.build(self.right(SCREAMING_SNAKE_CASE_ ) ,mid + 1 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : str = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )] ,self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if self.flag[idx] is True:
snake_case : int = self.lazy[idx]
snake_case : List[str] = False
if left_element != right_element:
snake_case : int = self.lazy[idx]
snake_case : List[str] = self.lazy[idx]
snake_case : List[Any] = True
snake_case : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
snake_case : Optional[Any] = val
if left_element != right_element:
snake_case : str = val
snake_case : Optional[Any] = val
snake_case : Optional[Any] = True
snake_case : List[Any] = True
return True
snake_case : List[str] = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.update(self.right(SCREAMING_SNAKE_CASE_ ) ,mid + 1 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )] ,self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
return True
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if self.flag[idx] is True:
snake_case : List[Any] = self.lazy[idx]
snake_case : List[Any] = False
if left_element != right_element:
snake_case : List[str] = self.lazy[idx]
snake_case : int = self.lazy[idx]
snake_case : int = True
snake_case : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
snake_case : List[Any] = (left_element + right_element) // 2
snake_case : List[str] = self.query(self.left(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = self.query(self.right(SCREAMING_SNAKE_CASE_ ) ,mid + 1 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return max(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __str__( self ):
'''simple docstring'''
return str([self.query(1 ,1 ,self.size ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for i in range(1 ,self.size + 1 )] )
if __name__ == "__main__":
__lowercase : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__lowercase : Union[str, Any] = 15
__lowercase : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 36
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: Dict ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = R'\w+[.]\d+'
_UpperCAmelCase = re.findall(a__ , a__ )
for pat in pats:
_UpperCAmelCase = key.replace(a__ , '_'.join(pat.split('.' ) ) )
return key
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
_UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase__ ( a__: Any , a__: Optional[Any] , a__: int=4_2 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_UpperCAmelCase = flax_model.init_weights(PRNGKey(a__ ) )
_UpperCAmelCase = flatten_dict(a__ )
_UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
_UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor(a__ , a__ , a__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(a__ )
return unflatten_dict(a__ )
| 618
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = DiTPipeline
SCREAMING_SNAKE_CASE_ : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE_ : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : str = False
def __UpperCAmelCase ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
_lowercase = TransformeraDModel(
sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=__A ,activation_fn='gelu-approximate' ,num_embeds_ada_norm=1000 ,norm_type='ada_norm_zero' ,norm_elementwise_affine=__A ,)
_lowercase = AutoencoderKL()
_lowercase = DDIMScheduler()
_lowercase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self : Optional[int] ,__A : List[Any] ,__A : str=0 ) -> Union[str, Any]:
if str(__A ).startswith('mps' ):
_lowercase = torch.manual_seed(__A )
else:
_lowercase = torch.Generator(device=__A ).manual_seed(__A )
_lowercase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
_lowercase = 'cpu'
_lowercase = self.get_dummy_components()
_lowercase = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = self.get_dummy_inputs(__A )
_lowercase = pipe(**__A ).images
_lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 16, 16, 3) )
_lowercase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A ,1e-3 )
def __UpperCAmelCase ( self : Any ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=__A ,expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
_lowercase = torch.manual_seed(0 )
_lowercase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_lowercase = ['vase', 'umbrella', 'white shark', 'white wolf']
_lowercase = pipe.get_label_ids(__A )
_lowercase = pipe(__A ,generator=__A ,num_inference_steps=40 ,output_type='np' ).images
for word, image in zip(__A ,__A ):
_lowercase = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
_lowercase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_lowercase = ['vase', 'umbrella']
_lowercase = pipe.get_label_ids(__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(__A ,generator=__A ,num_inference_steps=25 ,output_type='np' ).images
for word, image in zip(__A ,__A ):
_lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 702
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """spiece.model"""}
snake_case = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
snake_case = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any ,__A : Tuple ,__A : Any=False ,__A : int=False ,__A : List[str]=False ,__A : List[str]=None ,__A : Dict=None ,__A : Dict=None ,__A : Union[str, Any]=None ,__A : Optional[Dict[str, Any]] = None ,**__A : Tuple ,) -> None:
_lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowercase = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
_lowercase = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowercase = '<|endoftext|>' if eos_token is None else eos_token
_lowercase = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowercase = unk_token if pad_token is None else pad_token
_lowercase = eos_token if bos_token is None else bos_token
else:
_lowercase = '<pad>' if pad_token is None else pad_token
_lowercase = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=__A ,remove_space=__A ,keep_accents=__A ,bos_token=__A ,eos_token=__A ,unk_token=__A ,pad_token=__A ,sp_model_kwargs=self.sp_model_kwargs ,**__A ,)
_lowercase = do_lower_case
_lowercase = remove_space
_lowercase = keep_accents
_lowercase = vocab_file
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowercase = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowercase = re.compile(
F"""[{"".join(map(__A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self : List[Any] ) -> List[str]:
_lowercase = self.__dict__.copy()
_lowercase = None
return state
def __setstate__( self : Optional[Any] ,__A : Dict ) -> str:
_lowercase = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowercase = {}
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __UpperCAmelCase ( self : List[Any] ) -> int:
return len(self.sp_model )
def __UpperCAmelCase ( self : Optional[Any] ,__A : str ) -> str:
_lowercase = self.non_printing_characters_re.sub('' ,__A )
# Normalize whitespaces
_lowercase = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
_lowercase = unicodedata.normalize('NFC' ,__A )
return text
def __UpperCAmelCase ( self : Optional[Any] ,__A : str ,**__A : Optional[int] ) -> List[str]:
_lowercase = self.preprocess_text(__A )
return self.sp_model.encode(__A ,out_type=__A )
def __UpperCAmelCase ( self : List[Any] ,__A : str ) -> int:
return self.sp_model.PieceToId(__A )
def __UpperCAmelCase ( self : Any ,__A : int ) -> str:
return self.sp_model.IdToPiece(__A )
@staticmethod
def __UpperCAmelCase ( __A : str ) -> str:
return out_string
def __UpperCAmelCase ( self : Tuple ,__A : List[str] ) -> str:
_lowercase = []
_lowercase = ''
_lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowercase = True
_lowercase = []
else:
current_sub_tokens.append(__A )
_lowercase = False
out_string += self.sp_model.decode(__A )
return out_string
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict[str, int]:
_lowercase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : Optional[int] ,__A : str ,__A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase = os.path.join(
__A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A ,'wb' ) as fi:
_lowercase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def __UpperCAmelCase ( self : str ,__A : Union[str, List[str]] ,__A : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__A ,__A ):
_lowercase = self.preprocess_text(__A )
_lowercase = self.sp_model.encode(__A )
else:
_lowercase = [self.preprocess_text(__A ) for t in text]
_lowercase = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowercase = torch.tensor(__A )
return token_ids
def __UpperCAmelCase ( self : Optional[Any] ,__A : Union[int, List[int]] ) -> str:
return self.sp_model.decode(__A )
def __UpperCAmelCase ( self : str ,__A : "Conversation" ) -> List[int]:
_lowercase = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
_lowercase = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__A ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__A )
| 535
| 0
|
def UpperCamelCase ( ) -> List[str]:
UpperCamelCase : Optional[Any] = 0
for i in range(1 , 1001 ):
total += i**i
return str(snake_case__ )[-10:]
if __name__ == "__main__":
print(solution())
| 40
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40
| 1
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 704
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Tuple = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """openai-gpt"""
__SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] , a_ : int=40_478 , a_ : List[str]=512 , a_ : Any=768 , a_ : Dict=12 , a_ : Tuple=12 , a_ : List[str]="gelu" , a_ : Optional[int]=0.1 , a_ : Tuple=0.1 , a_ : List[Any]=0.1 , a_ : Optional[Any]=1e-5 , a_ : int=0.02 , a_ : str="cls_index" , a_ : Tuple=True , a_ : Dict=None , a_ : List[Any]=True , a_ : Optional[int]=0.1 , **a_ : Tuple , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = afn
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = attn_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = summary_type
__snake_case = summary_use_proj
__snake_case = summary_activation
__snake_case = summary_first_dropout
__snake_case = summary_proj_to_labels
super().__init__(**a_ )
| 69
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int = 1_00_00_00 ) -> int:
__snake_case = 1
__snake_case = 1
__snake_case = {1: 1}
for inputa in range(2 , _UpperCAmelCase ):
__snake_case = 0
__snake_case = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__snake_case = (3 * number) + 1
counter += 1
if inputa not in counters:
__snake_case = counter
if counter > pre_counter:
__snake_case = inputa
__snake_case = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 69
| 1
|
__lowercase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
__lowercase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def lowercase ( __A : float , __A : str , __A : str ) -> float:
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case : Optional[Any] = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(__A )}"""
)
raise ValueError(__A )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315
|
def lowercase ( __A : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = len(__A )
for i in range(length - 1 ):
snake_case : Dict = i
for k in range(i + 1 , __A ):
if collection[k] < collection[least]:
snake_case : Any = k
if least != i:
snake_case , snake_case : Any = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowercase : int = input('''Enter numbers separated by a comma:\n''').strip()
__lowercase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 315
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=4 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.0_2 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = rotary_dim
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = None
lowerCAmelCase = vocab_size - 1
lowerCAmelCase = vocab_size - 1
lowerCAmelCase = vocab_size - 1
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __snake_case ( self ) -> str:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __snake_case ( self , A_ , A_ , A_ , A_ ) -> Tuple:
lowerCAmelCase = 20
lowerCAmelCase = model_class_name(A_ )
lowerCAmelCase = model.init_cache(input_ids.shape[0] , A_ )
lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=A_ , past_key_values=outputs_cache.past_key_values , position_ids=A_ , )
lowerCAmelCase = model(A_ )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def __snake_case ( self , A_ , A_ , A_ , A_ ) -> Dict:
lowerCAmelCase = 20
lowerCAmelCase = model_class_name(A_ )
lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase = model.init_cache(input_ids.shape[0] , A_ )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=A_ , position_ids=A_ , )
lowerCAmelCase = model(A_ , attention_mask=A_ )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
@require_flax
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCAmelCase : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = FlaxGPTJModelTester(self )
def __snake_case ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(A_ , A_ , A_ , A_ )
def __snake_case ( self ) -> Any:
for model_class_name in self.all_model_classes:
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
A_ , A_ , A_ , A_ )
@tooslow
def __snake_case ( self ) -> str:
lowerCAmelCase = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=A_ , truncation=A_ )
lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase = False
lowerCAmelCase = model.config.eos_token_id
lowerCAmelCase = jax.jit(model.generate )
lowerCAmelCase = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
lowerCAmelCase = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(A_ , A_ )
@is_pt_flax_cross_test
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase = getattr(A_ , A_ )
lowerCAmelCase, lowerCAmelCase = pt_inputs["""input_ids"""].shape
lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = pt_model_class(A_ ).eval()
lowerCAmelCase = model_class(A_ , dtype=jnp.floataa )
lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , A_ )
lowerCAmelCase = fx_state
with torch.no_grad():
lowerCAmelCase = pt_model(**A_ ).to_tuple()
lowerCAmelCase = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A_ )
lowerCAmelCase = model_class.from_pretrained(A_ , from_pt=A_ )
lowerCAmelCase = fx_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __snake_case ( self ) -> Tuple:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase = getattr(A_ , A_ )
lowerCAmelCase = pt_model_class(A_ ).eval()
lowerCAmelCase = model_class(A_ , dtype=jnp.floataa )
lowerCAmelCase = load_flax_weights_in_pytorch_model(A_ , fx_model.params )
lowerCAmelCase, lowerCAmelCase = pt_inputs["""input_ids"""].shape
lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 0
lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase = pt_model(**A_ ).to_tuple()
lowerCAmelCase = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A_ )
lowerCAmelCase = pt_model_class.from_pretrained(A_ , from_flax=A_ )
with torch.no_grad():
lowerCAmelCase = pt_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __snake_case ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
| 433
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
UpperCAmelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[int] = ["input_ids", "attention_mask"]
UpperCAmelCase : List[str] = NllbTokenizer
UpperCAmelCase : List[int] = []
UpperCAmelCase : List[int] = []
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=None , A_=None , A_=None , A_=False , **A_ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
lowerCAmelCase = legacy_behaviour
super().__init__(
vocab_file=A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , legacy_behaviour=A_ , **A_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(A_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __snake_case ( self ) -> str:
return self._src_lang
@src_lang.setter
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self , A_ , A_ , A_ , A_ , **A_ ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase = src_lang
lowerCAmelCase = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
lowerCAmelCase = self.convert_tokens_to_ids(A_ )
lowerCAmelCase = tgt_lang_id
return inputs
def __snake_case ( self , A_ , A_ = "eng_Latn" , A_ = None , A_ = "fra_Latn" , **A_ , ) -> BatchEncoding:
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def __snake_case ( self ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def __snake_case ( self ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(A_ )
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(A_ )
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 433
| 1
|
'''simple docstring'''
from PIL import Image
def _lowerCamelCase( UpperCamelCase__ : Image , UpperCamelCase__ : int ) -> Image:
A : Tuple = (259 * (level + 255)) / (255 * (259 - level))
def contrast(UpperCamelCase__ : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
snake_case_ = change_contrast(img, 1_70)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 537
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case_ = ["""text""", """image""", """audio"""]
def _lowerCamelCase( UpperCamelCase__ : List[str] ) -> str:
A : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _lowerCamelCase( UpperCamelCase__ : List ) -> Tuple:
A : Optional[int] = []
for output in outputs:
if isinstance(UpperCamelCase__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(UpperCamelCase__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(UpperCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _lowercase :
def snake_case ( self ):
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
A : Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , _UpperCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
A : Tuple = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case ( self ):
A : Any = create_inputs(self.tool.inputs )
A : Dict = self.tool(*_UpperCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
A : Optional[int] = [outputs]
self.assertListEqual(output_types(_UpperCAmelCase ) , self.tool.outputs )
def snake_case ( self ):
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def snake_case ( self ):
A : List[str] = create_inputs(self.tool.inputs )
A : Dict = self.tool(*_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A : int = [outputs]
self.assertEqual(len(_UpperCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(_UpperCAmelCase , self.tool.outputs ):
A : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
def snake_case ( self ):
A : Tuple = create_inputs(self.tool.inputs )
A : Dict = []
for _input, input_type in zip(_UpperCAmelCase , self.tool.inputs ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
A : List[Any] = self.tool(*_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A : Any = [outputs]
self.assertEqual(len(_UpperCAmelCase ) , len(self.tool.outputs ) )
| 537
| 1
|
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693
| 1
|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class snake_case ( UpperCAmelCase ):
__magic_name__ = (DPMSolverSDEScheduler,)
__magic_name__ = 10
def lowerCamelCase__ ( self : Union[str, Any] , **A : Any ):
'''simple docstring'''
a : str = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**A )
return config
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=A , beta_end=A )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : int = self.scheduler_classes[0]
a : str = self.get_scheduler_config()
a : Dict = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
a : Any = self.dummy_model()
a : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
a : Optional[int] = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
a : str = scheduler.scale_model_input(A , A )
a : Union[str, Any] = model(A , A )
a : int = scheduler.step(A , A , A )
a : Optional[Any] = output.prev_sample
a : Any = torch.sum(torch.abs(A ) )
a : Dict = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Union[str, Any] = self.scheduler_classes[0]
a : int = self.get_scheduler_config(prediction_type='v_prediction' )
a : int = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
a : Tuple = self.dummy_model()
a : str = self.dummy_sample_deter * scheduler.init_noise_sigma
a : int = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
a : str = scheduler.scale_model_input(A , A )
a : Optional[int] = model(A , A )
a : Dict = scheduler.step(A , A , A )
a : Union[str, Any] = output.prev_sample
a : str = torch.sum(torch.abs(A ) )
a : Optional[Any] = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Tuple = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config()
a : Tuple = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
a : List[Any] = self.dummy_model()
a : List[Any] = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a : int = scheduler.scale_model_input(A , A )
a : Union[str, Any] = model(A , A )
a : int = scheduler.step(A , A , A )
a : List[Any] = output.prev_sample
a : int = torch.sum(torch.abs(A ) )
a : Optional[int] = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : List[Any] = self.scheduler_classes[0]
a : Optional[int] = self.get_scheduler_config()
a : int = scheduler_class(**A , use_karras_sigmas=A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
a : Any = self.dummy_model()
a : List[Any] = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
a : int = sample.to(A )
for t in scheduler.timesteps:
a : int = scheduler.scale_model_input(A , A )
a : Optional[Any] = model(A , A )
a : str = scheduler.step(A , A , A )
a : List[str] = output.prev_sample
a : Union[str, Any] = torch.sum(torch.abs(A ) )
a : int = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
| 720
|
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 118
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = ['image_processor', 'tokenizer']
a :Optional[int] = 'ViltImageProcessor'
a :int = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=None , **SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
lowercase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
lowercase_ = kwargs.pop('''feature_extractor''' )
lowercase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_processor
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE_ : int , ) -> BatchEncoding:
lowercase_ = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# add pixel_values + pixel_mask
lowercase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
encoding.update(SCREAMING_SNAKE_CASE_ )
return encoding
def _lowercase ( self : str , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = self.tokenizer.model_input_names
lowercase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self : Any ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 97
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[int] ="""imagegpt"""
A__ : Union[str, Any] =["""past_key_values"""]
A__ : Union[str, Any] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCAmelCase_ : Dict=512 + 1 , UpperCAmelCase_ : Union[str, Any]=32 * 32 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Union[str, Any]=24 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple="quick_gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=1e-5 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=False , **UpperCAmelCase_ : List[str] , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ )
class lowercase__ ( _UpperCAmelCase ):
@property
def A_ ( self : List[str] ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def A_ ( self : Optional[int] , UpperCAmelCase_ : "FeatureExtractionMixin" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , ):
SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(preprocessor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) )
return inputs
| 472
| 0
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCamelCase = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__UpperCamelCase = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the training data."} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
snake_case: str = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case: Optional[Any] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase = field(
default=snake_case , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case: Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case: str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case: Tuple = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case: List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case: Optional[int] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case: Tuple = data_args.train_file.split('.' )[-1]
snake_case: Union[str, Any] = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case: Union[str, Any] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
snake_case: List[Any] = load_dataset('csv' , data_files=__A , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case: Optional[Any] = load_dataset('json' , data_files=__A , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case: Tuple = raw_datasets['train'].features['label'].names
snake_case: List[str] = len(__A )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case: List[str] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__A , )
snake_case: Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case: int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case: Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case: Optional[Any] = {'Refused': 0, 'Entailed': 1}
snake_case: List[Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case: List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__A : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(__A : Dict ):
snake_case: str = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
snake_case: List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case: str = examples['statement']
snake_case: int = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
snake_case: List[Any] = tokenizer(__A , __A , padding=__A , max_length=__A , truncation=__A )
snake_case: List[Any] = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
snake_case: int = raw_datasets.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case: List[str] = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case: Tuple = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case: Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case: Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
snake_case: str = raw_datasets['test']
if data_args.max_predict_samples is not None:
snake_case: List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__A ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : EvalPrediction ):
snake_case: int = p.predictions[0] if isinstance(p.predictions , __A ) else p.predictions
snake_case: List[str] = np.argmax(__A , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case: str = default_data_collator
elif training_args.fpaa:
snake_case: List[str] = DataCollatorWithPadding(__A , pad_to_multiple_of=8 )
else:
snake_case: List[Any] = None
# Initialize our Trainer
snake_case: List[str] = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
snake_case: Optional[int] = None
if training_args.resume_from_checkpoint is not None:
snake_case: str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case: Optional[Any] = last_checkpoint
snake_case: Union[str, Any] = trainer.train(resume_from_checkpoint=__A )
snake_case: List[Any] = train_result.metrics
snake_case: List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
snake_case: Optional[Any] = min(__A , len(__A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __A )
trainer.save_metrics('train' , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case: Dict = trainer.evaluate(eval_dataset=__A )
snake_case: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
snake_case: Dict = min(__A , len(__A ) )
trainer.log_metrics('eval' , __A )
trainer.save_metrics('eval' , __A )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case: Optional[int] = predict_dataset.remove_columns('label' )
snake_case: str = trainer.predict(__A , metric_key_prefix='predict' ).predictions
snake_case: Any = np.argmax(__A , axis=1 )
snake_case: int = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__A , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__A ):
snake_case: int = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
snake_case: Optional[int] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 714
|
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: List[str] = n * (n + 1) * (2 * n + 1) / 6
snake_case: List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 692
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase(_UpperCamelCase ):
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__a , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(__a , 'num_attention_heads' ) )
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4_0 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = last_hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = conv_kernel_size
a__ = output_stride
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = classifier_dropout_prob
a__ = use_labels
a__ = is_training
a__ = num_labels
a__ = initializer_range
a__ = scope
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.num_labels )
a__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
a__ = MobileViTModel(config=__a )
model.to(__a )
model.eval()
a__ = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
a__ = self.num_labels
a__ = MobileViTForImageClassification(__a )
model.to(__a )
model.eval()
a__ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
a__ = self.num_labels
a__ = MobileViTForSemanticSegmentation(__a )
model.to(__a )
model.eval()
a__ = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a__ = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__snake_case: Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__snake_case: Dict = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case: Tuple = False
__snake_case: Union[str, Any] = False
__snake_case: Tuple = False
__snake_case: int = False
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = MobileViTModelTester(self )
a__ = MobileViTConfigTester(self , config_class=__a , has_text_modality=__a )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__a )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__a , __a ) )
a__ = outputs.hidden_states
a__ = 5
self.assertEqual(len(__a ) , __a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a__ = 2
for i in range(len(__a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(__a , __a , __a )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = MobileViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __magic_name__ ( ) -> int:
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase(unittest.TestCase ):
@cached_property
def lowercase__ ( self ) -> str:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__a )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
a__ = model(**__a )
# verify the logits
a__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __a )
a__ = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = model.to(__a )
a__ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = prepare_img()
a__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
a__ = model(**__a )
a__ = outputs.logits
# verify the logits
a__ = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , __a )
a__ = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1e-4 ) )
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = model.to(__a )
a__ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = prepare_img()
a__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
a__ = model(**__a )
a__ = outputs.logits.detach().cpu()
a__ = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(5_0, 6_0)] )
a__ = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , __a )
a__ = image_processor.post_process_semantic_segmentation(outputs=__a )
a__ = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , __a )
| 273
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = '<pad>'
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__a ) , 1002 )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@cached_property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__a , f.name )
UpperCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=__a )
UpperCAmelCase__ = pickle.dumps(__a )
pickle.loads(__a )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = tokenizer.tokenize(__a )
UpperCAmelCase__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'Hello World!'
UpperCAmelCase__ = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCAmelCase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 146
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
'''simple docstring'''
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = data
__lowerCAmelCase = None
class a__ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
__lowerCAmelCase = self.head
while self.head:
yield node.data
__lowerCAmelCase = node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(_A ) for item in iter(self ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
self.insert_nth(len(self ) , _A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
self.insert_nth(0 , _A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
__lowerCAmelCase = Node(_A )
if self.head is None:
__lowerCAmelCase = new_node # first node points itself
__lowerCAmelCase = __lowerCAmelCase = new_node
elif index == 0: # insert at head
__lowerCAmelCase = self.head
__lowerCAmelCase = __lowerCAmelCase = new_node
else:
__lowerCAmelCase = self.head
for _ in range(index - 1 ):
__lowerCAmelCase = temp.next
__lowerCAmelCase = temp.next
__lowerCAmelCase = new_node
if index == len(self ) - 1: # insert at tail
__lowerCAmelCase = new_node
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.delete_nth(0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __SCREAMING_SNAKE_CASE( self , _A = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
__lowerCAmelCase = self.head
if self.head == self.tail: # just one node
__lowerCAmelCase = __lowerCAmelCase = None
elif index == 0: # delete head node
__lowerCAmelCase = self.tail.next.next
__lowerCAmelCase = self.head.next
else:
__lowerCAmelCase = self.head
for _ in range(index - 1 ):
__lowerCAmelCase = temp.next
__lowerCAmelCase = temp.next
__lowerCAmelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
__lowerCAmelCase = temp
return delete_node.data
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return len(self ) == 0
def _a ( ):
__lowerCAmelCase = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=3 , _A=1_8 , _A=3_0 , _A=4_0_0 , _A=True , _A=None , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = size if size is not None else {"height": 1_8, "width": 1_8}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = apply_ocr
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a__ ( snake_case__ , unittest.TestCase ):
_a : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "apply_ocr" ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , _A )
self.assertIsInstance(encoding.boxes , _A )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowerCAmelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
__lowerCAmelCase = Image.open(ds[0]["file"] ).convert("RGB" )
__lowerCAmelCase = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowerCAmelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
__lowerCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _A )
self.assertListEqual(encoding.boxes , _A )
# with apply_OCR = False
__lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=_A )
__lowerCAmelCase = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 552
| 0
|
'''simple docstring'''
import math
def A_( A : int):
UpperCamelCase = 0
UpperCamelCase = 0
while num > 0:
UpperCamelCase = num % 8
UpperCamelCase = octal + (remainder * math.floor(math.pow(10 , A)))
counter += 1
UpperCamelCase = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(A)}'''
def A_( ):
print('\n2 in octal is:')
print(decimal_to_octal(2)) # = 2
print('\n8 in octal is:')
print(decimal_to_octal(8)) # = 10
print('\n65 in octal is:')
print(decimal_to_octal(65)) # = 101
print('\n216 in octal is:')
print(decimal_to_octal(216)) # = 330
print('\n512 in octal is:')
print(decimal_to_octal(512)) # = 1000
print('\n')
if __name__ == "__main__":
main()
| 3
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3
| 1
|
from __future__ import annotations
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) < k or k < 0:
raise ValueError('Invalid Input' )
snake_case_ = snake_case_ = sum(array[:k] )
for i in range(len(lowercase__ ) - k ):
snake_case_ = current_sum - array[i] + array[i + k]
snake_case_ = max(lowercase__ , lowercase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A = [randint(-1000, 1000) for i in range(100)]
A = randint(0, 110)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 703
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """unispeech-sat"""
def __init__( self , __UpperCamelCase=32 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase="group" , __UpperCamelCase="gelu" , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=1_28 , __UpperCamelCase=16 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=3_20 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=1_00 , __UpperCamelCase=2_56 , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase="mean" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2_56 , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=5_12 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=5_04 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = num_clusters
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = xvector_output_dim
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 46
| 0
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __UpperCamelCase ( _A ):
return 1 / (1 + np.exp(-z ))
def __UpperCamelCase ( _A , _A ):
return (-y * np.log(_SCREAMING_SNAKE_CASE ) - (1 - y) * np.log(1 - h )).mean()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return np.sum(y * scores - np.log(1 + np.exp(_SCREAMING_SNAKE_CASE ) ) )
def __UpperCamelCase ( _A , _A , _A , _A=70000 ):
lowerCAmelCase_ = np.zeros(x.shape[1] )
for iterations in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = sigmoid_function(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = np.dot(x.T , h - y ) / y.size
lowerCAmelCase_ = theta - alpha * gradient # updating the weights
lowerCAmelCase_ = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = sigmoid_function(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = cost_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if iterations % 100 == 0:
print(f"loss: {j} \t" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_A = datasets.load_iris()
_A = iris.data[:, :2]
_A = (iris.target != 0) * 1
_A = 0.1
_A = logistic_reg(alpha, x, y, max_iterations=70_000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __UpperCamelCase ( _A ):
return sigmoid_function(
np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((_A) , (_A)) = (x[:, 0].min(), x[:, 0].max())
((_A) , (_A)) = (x[:, 1].min(), x[:, 1].max())
((_A) , (_A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_A = np.c_[xxa.ravel(), xxa.ravel()]
_A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 431
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : str , a : int = 101 )-> List[Any]:
"""simple docstring"""
lowercase__ = length
def __len__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
return self.length
def __getitem__( self : List[Any] , a : Union[str, Any] )-> int:
"""simple docstring"""
return i
class SCREAMING_SNAKE_CASE :
def __call__( self : str , a : str )-> Optional[Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(a ), "labels": torch.tensor(a )}
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__ = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Tuple , a : Any=None )-> Optional[int]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f"""--output_dir {output_dir}""".split()
lowercase__ = ['torchrun'] + distributed_args + args
execute_subprocess_async(a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f"""--output_dir {output_dir}""".split()
lowercase__ = ['torchrun'] + distributed_args + args
execute_subprocess_async(a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase_ = HfArgumentParser((TrainingArguments,))
lowercase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowercase_ = DummyDataset(dataset_length)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
lowercase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowercase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = 2
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = None
| 235
| 0
|
import argparse
import datetime
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ={
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
SCREAMING_SNAKE_CASE__ ={0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__UpperCamelCase ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
SCREAMING_SNAKE_CASE__ =int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
SCREAMING_SNAKE_CASE__ =date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
SCREAMING_SNAKE_CASE__ =int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
SCREAMING_SNAKE_CASE__ =date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
SCREAMING_SNAKE_CASE__ =int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ =datetime.date(int(__UpperCamelCase ), int(__UpperCamelCase ), int(__UpperCamelCase ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ =y - 1
SCREAMING_SNAKE_CASE__ =m + 12
# maths var
SCREAMING_SNAKE_CASE__ =int(str(__UpperCamelCase )[:2] )
SCREAMING_SNAKE_CASE__ =int(str(__UpperCamelCase )[2:] )
SCREAMING_SNAKE_CASE__ =int(2.6 * m - 5.39 )
SCREAMING_SNAKE_CASE__ =int(c / 4 )
SCREAMING_SNAKE_CASE__ =int(k / 4 )
SCREAMING_SNAKE_CASE__ =int(d + k )
SCREAMING_SNAKE_CASE__ =int(t + u + v + x )
SCREAMING_SNAKE_CASE__ =int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ =round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
SCREAMING_SNAKE_CASE__ =f"""Your date {date_input}, is a {days[str(__UpperCamelCase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
lowerCamelCase_ = parser.parse_args()
zeller(args.date_input)
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 588
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( a__ ):
_UpperCAmelCase ="yolos"
def __init__( self: List[str] , a: int=7_68 , a: List[str]=12 , a: Dict=12 , a: Any=30_72 , a: List[Any]="gelu" , a: Any=0.0 , a: Dict=0.0 , a: Dict=0.02 , a: Dict=1e-12 , a: List[str]=[5_12, 8_64] , a: int=16 , a: List[str]=3 , a: Dict=True , a: List[Any]=1_00 , a: Union[str, Any]=True , a: Optional[Any]=False , a: Optional[int]=1 , a: str=5 , a: List[Any]=2 , a: int=5 , a: Any=2 , a: Union[str, Any]=0.1 , **a: Dict , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = num_detection_tokens
a_ = use_mid_position_embeddings
a_ = auxiliary_loss
# Hungarian matcher
a_ = class_cost
a_ = bbox_cost
a_ = giou_cost
# Loss coefficients
a_ = bbox_loss_coefficient
a_ = giou_loss_coefficient
a_ = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( a__ ):
_UpperCAmelCase =version.parse('''1.11''' )
@property
def _lowerCAmelCase ( self: Tuple) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def _lowerCAmelCase ( self: Union[str, Any]) ->float:
'''simple docstring'''
return 1e-4
@property
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
return 12
| 685
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : str = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 571
|
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
if not isinstance(_A , _A ):
__lowerCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 0:
return False
__lowerCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571
| 1
|
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCAmelCase__ ), "This is a local test"
| 247
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = CLIPConfig
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""CLIPEncoderLayer"""]
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(config.vision_config )
SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=0.5 , lowerCAmelCase__=0.5 ) -> List[str]:
SCREAMING_SNAKE_CASE = self.vision_model(lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = self.p_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nsfw_detected.flatten()
SCREAMING_SNAKE_CASE = nsfw_detected > p_threshold
SCREAMING_SNAKE_CASE = nsfw_detected.tolist()
if any(lowerCAmelCase__ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(lowerCAmelCase__ ):
if nsfw_detected_:
SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
SCREAMING_SNAKE_CASE = self.w_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = watermark_detected.flatten()
SCREAMING_SNAKE_CASE = watermark_detected > w_threshold
SCREAMING_SNAKE_CASE = watermark_detected.tolist()
if any(lowerCAmelCase__ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(lowerCAmelCase__ ):
if watermark_detected_:
SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 247
| 1
|
def _lowerCAmelCase( __A ):
UpperCAmelCase = len(__A )
UpperCAmelCase = len(matrix[0] )
UpperCAmelCase = min(__A , __A )
for row in range(__A ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __A ):
UpperCAmelCase = matrix[col][row] / matrix[row][row]
for i in range(__A , __A ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCAmelCase = True
for i in range(row + 1 , __A ):
if matrix[i][row] != 0:
UpperCAmelCase , UpperCAmelCase = matrix[i], matrix[row]
UpperCAmelCase = False
break
if reduce:
rank -= 1
for i in range(__A ):
UpperCAmelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase_ = 25_0004
lowerCAmelCase_ = 25_0020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = MBartTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : Optional[Any] = MBartTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : List[Any] = MBartTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
UpperCamelCase : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCamelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase : Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase : Optional[int] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase : str = tempfile.mkdtemp()
UpperCamelCase : Optional[Any] = tokenizer_r.save_pretrained(lowerCamelCase )
UpperCamelCase : int = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCamelCase : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCamelCase , lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCamelCase : Union[str, Any] = tokenizer_r.from_pretrained(lowerCamelCase )
UpperCamelCase : Any = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
UpperCamelCase : List[str] = tempfile.mkdtemp()
UpperCamelCase : str = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase )
UpperCamelCase : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase , lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCamelCase : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase )
UpperCamelCase : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
UpperCamelCase : Dict = tempfile.mkdtemp()
UpperCamelCase : str = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase )
UpperCamelCase : Tuple = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase : str = tokenizer_r.from_pretrained(lowerCamelCase )
UpperCamelCase : List[str] = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''facebook/mbart-large-en-ro'''
__SCREAMING_SNAKE_CASE = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__SCREAMING_SNAKE_CASE = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__SCREAMING_SNAKE_CASE = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> str:
'''simple docstring'''
UpperCamelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCamelCase : Union[str, Any] = 1
return cls
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
self.assertIn(lowerCamelCase , self.tokenizer.all_special_ids )
UpperCamelCase : Union[str, Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
UpperCamelCase : List[Any] = self.tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
UpperCamelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowerCamelCase )
UpperCamelCase : Tuple = 10
UpperCamelCase : str = self.tokenizer(lowerCamelCase , max_length=lowerCamelCase , truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_26, 25_00_01] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[int] = tempfile.mkdtemp()
UpperCamelCase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
UpperCamelCase : str = MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase , return_tensors="pt" )
UpperCamelCase : Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCamelCase : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCamelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.tokenizer(self.src_text , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=3 , return_tensors="pt" )
UpperCamelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=10 , return_tensors="pt" )
UpperCamelCase : List[str] = targets["input_ids"]
UpperCamelCase : Optional[Any] = shift_tokens_right(lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Dict = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(lowerCamelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 30_34, 2, 25_00_04]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} , )
| 173
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ = '3.0.12'
lowerCAmelCase_ = None
def A__ ( ):
'''simple docstring'''
global _logger
UpperCamelCase : List[str] = _logger or logging.getLogger(__name__)
return _logger
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
'''simple docstring'''
UpperCamelCase : Tuple = lock_file
return None
def __str__( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = lock
return None
def __enter__( self ) -> Optional[int]:
'''simple docstring'''
return self.lock
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
self.lock.release()
return None
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Tuple = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
UpperCamelCase : int = self.hash_filename_if_too_long(lowerCamelCase , lowerCamelCase )
# The path to the lock file.
UpperCamelCase : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Dict = None
# The default timeout value.
UpperCamelCase : Dict = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Any = 0
return None
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = float(lowerCamelCase )
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase=None , lowerCamelCase=0.05 ) -> Union[str, Any]:
'''simple docstring'''
if timeout is None:
UpperCamelCase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : str = id(self )
UpperCamelCase : Union[str, Any] = self._lock_file
UpperCamelCase : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : Tuple = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : Optional[Any] = id(self )
UpperCamelCase : Tuple = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
UpperCamelCase : List[str] = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> List[Any]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.release()
return None
def __del__( self ) -> List[str]:
'''simple docstring'''
self.release(force=lowerCamelCase )
return None
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(lowerCamelCase )
if len(lowerCamelCase ) > max_length and max_length > 0:
UpperCamelCase : str = os.path.dirname(lowerCamelCase )
UpperCamelCase : Any = str(hash(lowerCamelCase ) )
UpperCamelCase : Dict = filename[: max_length - len(lowerCamelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowerCamelCase , lowerCamelCase )
else:
return path
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> Optional[int]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase )
UpperCamelCase : List[str] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , lowerCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase )
else:
UpperCamelCase : Dict = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
msvcrt.locking(lowerCamelCase , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = os.statvfs(os.path.dirname(lowerCamelCase ) ).f_namemax
super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : Union[str, Any] = os.open(self._lock_file , lowerCamelCase )
try:
fcntl.flock(lowerCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase )
else:
UpperCamelCase : Any = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self._lock_file_fd
UpperCamelCase : str = None
fcntl.flock(lowerCamelCase , fcntl.LOCK_UN )
os.close(lowerCamelCase )
return None
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Union[str, Any] = os.open(self._lock_file , lowerCamelCase )
except OSError:
pass
else:
UpperCamelCase : Dict = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ = None
if msvcrt:
lowerCAmelCase_ = WindowsFileLock
elif fcntl:
lowerCAmelCase_ = UnixFileLock
else:
lowerCAmelCase_ = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 173
| 1
|
from ... import PretrainedConfig
lowercase_: int = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowercase__ (__lowerCamelCase ):
__UpperCamelCase : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCamelCase : Dict = 'nezha'
def __init__( self : Dict , __a : Any=2_1_1_2_8 , __a : Optional[int]=7_6_8 , __a : Optional[int]=1_2 , __a : List[str]=1_2 , __a : Optional[int]=3_0_7_2 , __a : Optional[int]="gelu" , __a : Optional[Any]=0.1 , __a : str=0.1 , __a : Union[str, Any]=5_1_2 , __a : Any=6_4 , __a : Dict=2 , __a : int=0.02 , __a : Optional[Any]=1e-12 , __a : Tuple=0.1 , __a : Any=0 , __a : str=2 , __a : Optional[int]=3 , __a : str=True , **__a : Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
snake_case__ : str = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : str = intermediate_size
snake_case__ : Any = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Optional[int] = max_relative_position
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : int = classifier_dropout
snake_case__ : Optional[Any] = use_cache
| 714
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __a : Tuple , __a : Union[str, Any]=7 , __a : List[Any]=3 , __a : str=1_8 , __a : Optional[int]=3_0 , __a : Optional[int]=4_0_0 , __a : Optional[Any]=True , __a : Optional[int]=None , __a : str=True , __a : Union[str, Any]=None , __a : List[Any]=True , __a : int=[0.48145466, 0.4578275, 0.40821073] , __a : Optional[int]=[0.26862954, 0.26130258, 0.27577711] , __a : Optional[Any]=True , ):
snake_case__ : Tuple = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case__ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Dict = num_channels
snake_case__ : Optional[int] = image_size
snake_case__ : str = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : Optional[Any] = do_resize
snake_case__ : Union[str, Any] = size
snake_case__ : Optional[int] = do_center_crop
snake_case__ : Union[str, Any] = crop_size
snake_case__ : Tuple = do_normalize
snake_case__ : int = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Union[str, Any] = do_convert_rgb
def lowercase ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase ( self : str , __a : Optional[Any]=False , __a : Dict=False , __a : str=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
snake_case__ : List[Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
snake_case__ : str = []
for i in range(self.batch_size ):
snake_case__ , snake_case__ : Union[str, Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
snake_case__ : Optional[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
if torchify:
snake_case__ : List[Any] = [torch.from_numpy(__a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase ( self : str ):
snake_case__ : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a )
@property
def lowercase ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : Tuple ):
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """do_center_crop""" ) )
self.assertTrue(hasattr(__a , """center_crop""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
self.assertTrue(hasattr(__a , """do_convert_rgb""" ) )
def lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : int ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : str = self.image_processor_tester.prepare_inputs(equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase ( self : Dict ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Tuple = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase ( self : Any ):
snake_case__ : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a )
snake_case__ : Optional[int] = 3
@property
def lowercase ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : List[str] ):
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """do_center_crop""" ) )
self.assertTrue(hasattr(__a , """center_crop""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
self.assertTrue(hasattr(__a , """do_convert_rgb""" ) )
def lowercase ( self : Any ):
pass
def lowercase ( self : Any ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
snake_case__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 127
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.